From 4dcb11eab7bbeac866043d2a7c748b8d06fbd243 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Tue, 23 Apr 2024 18:45:43 +0800 Subject: [PATCH 01/34] add multimodal LLM BLIP-2 and InstructBLIP --- data/dataset_info.json | 7 +- data/llava_instruct_100.json | 4266 +++++++++++++++++++++++ examples/mllm/sft_blip2.sh | 34 + examples/mllm/sft_instructblip.sh | 35 + src/llmtuner/data/__init__.py | 4 +- src/llmtuner/data/loader.py | 50 +- src/llmtuner/hparams/data_args.py | 4 + src/llmtuner/hparams/finetuning_args.py | 2 +- src/llmtuner/hparams/model_args.py | 4 + src/llmtuner/model/__init__.py | 5 +- src/llmtuner/model/adapter.py | 114 +- src/llmtuner/model/loader.py | 116 +- src/llmtuner/train/sftmm/__init__.py | 3 + src/llmtuner/train/sftmm/collator.py | 69 + src/llmtuner/train/sftmm/metric.py | 61 + src/llmtuner/train/sftmm/trainer.py | 137 + src/llmtuner/train/sftmm/workflow.py | 105 + src/llmtuner/train/tuner.py | 5 +- 18 files changed, 4982 insertions(+), 39 deletions(-) create mode 100644 data/llava_instruct_100.json create mode 100644 examples/mllm/sft_blip2.sh create mode 100644 examples/mllm/sft_instructblip.sh create mode 100644 src/llmtuner/train/sftmm/__init__.py create mode 100644 src/llmtuner/train/sftmm/collator.py create mode 100644 src/llmtuner/train/sftmm/metric.py create mode 100644 src/llmtuner/train/sftmm/trainer.py create mode 100644 src/llmtuner/train/sftmm/workflow.py diff --git a/data/dataset_info.json b/data/dataset_info.json index e396ed50..bfda162f 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -340,7 +340,7 @@ "history": "history" } }, - "orca_dpo_de" : { + "orca_dpo_de": { "hf_hub_url": "mayflowergmbh/intel_orca_dpo_pairs_de", "ranking": true }, @@ -413,5 +413,10 @@ "prompt": "content" }, "folder": "python" + }, + "llava_instruct_100": { + "file_name": "llava_instruct_100.json", + "file_sha1": "96fa18313544e22444fe20eead7754b17da452ae", + "ranking": true } } \ No newline at end of file diff --git a/data/llava_instruct_100.json b/data/llava_instruct_100.json new file mode 100644 index 00000000..50779365 --- /dev/null +++ b/data/llava_instruct_100.json @@ -0,0 +1,4266 @@ +[ + { + "id": "000000379143", + "image": "000000379143.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the man doing in the back of the pickup truck?\n" + }, + { + "from": "gpt", + "value": "The man is sitting on top of piled objects or belongings loaded into the back of a pickup truck." + }, + { + "from": "human", + "value": "Is the man holding anything in his hands?" + }, + { + "from": "gpt", + "value": "Yes, the man is holding a beer in his hand while sitting on top of the objects in the back of the pickup truck." + }, + { + "from": "human", + "value": "What color is the pickup truck?" + }, + { + "from": "gpt", + "value": "The pickup truck is white." + }, + { + "from": "human", + "value": "Is the man sitting or standing?" + }, + { + "from": "gpt", + "value": "The man is sitting on top of the piled objects in the back of the pickup truck." + }, + { + "from": "human", + "value": "What could be the possible reasons for the man sitting on top of the possessions in the back of the pickup truck?" + }, + { + "from": "gpt", + "value": "There could be several reasons for the man sitting on top of his possessions in the back of the pickup truck:\n\n1. Moving: The man might be moving to a new location and needed to transport his items in a pickup truck, utilizing available space efficiently. By sitting on top of the belongings, he could be helping to stabilize and secure the items during the move, preventing them from falling or shifting during transportation.\n\n2. Lack of seating: If the cab of the pickup truck is already at full capacity or there isn't enough space for him to sit inside, the man may have chosen to sit on his possessions as an alternative seating arrangement.\n\n3. Road trip or outing: The man might be on a road trip or a casual outing with friends or family, where he is using the back of the pickup truck as an open-air seating area. By sitting on top of the loaded items, he may be enjoying the journey while savoring his beer.\n\n4. Keeping an eye on belongings: The man could be safeguarding his possessions by staying close to them, ensuring that no items are lost, stolen or damaged during the journey.\n\nRegardless of the specific reason, the image shows a person making the most of their situation, adding a touch of lightheartedness or adventure to an otherwise mundane scene." + } + ] + }, + { + "id": "000000083996", + "image": "000000083996.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many people are seen surfing in the image?" + }, + { + "from": "gpt", + "value": "The image shows several people or a group of people surfing the waves in the ocean." + }, + { + "from": "human", + "value": "What is the weather like in the image?" + }, + { + "from": "gpt", + "value": "The weather in the image is sunny." + }, + { + "from": "human", + "value": "What are the people doing in the water?" + }, + { + "from": "gpt", + "value": "The people in the water are surfing the waves on surfboards." + }, + { + "from": "human", + "value": "Can you explain the basics of surfing and the necessary equipment for beginners?" + }, + { + "from": "gpt", + "value": "Surfing is a popular water sport that involves riding waves on a surfboard while standing or lying in various positions. The main goal is to balance on the surfboard while being propelled forward by the wave's momentum. The sport originated from Polynesian culture and has evolved into a global phenomenon, attracting participants of all skill levels and backgrounds.\n\nFor beginners, there are a few essentials to get started with surfing:\n\n1. Surfboard: A beginner's surfboard should be wide, thick, and long to provide better stability and buoyancy. This makes it easier for beginners to learn balance and catch waves. Soft-top boards or foam boards are recommended for beginners, as they are more forgiving and less likely to cause injury during a wipeout.\n\n2. Wetsuit: A wetsuit is essential for most surfers, depending on the water temperature. Wetsuits are made of neoprene, providing insulation and flexibility to allow surfers to stay warm and move comfortably in the water. Thickness and type of wetsuit will vary based on the water temperature and local climate.\n\n3. Leash: A leash is a cord that attaches the surfboard to the surfer's ankle, preventing the board from drifting away after a wipeout. This is an essential safety tool and ensures that the surfboard stays within reach at all times.\n\n4. Surf Wax or Traction Pad: Surf wax or a traction pad is applied to the surfboard deck to provide grip and prevent surfers from slipping off their boards. Wax is applied in a circular pattern, while traction pads are adhesive pads placed on the deck.\n\n5. Sun Protection: Surfers should wear sunscreen, sunglasses, and, in some cases, a hat or rash guard to protect their skin from the sun's harmful rays.\n\nBefore hitting the waves, beginners should take a few lessons with an experienced instructor to learn essential techniques, safety tips, and surfing etiquette. With the right equipment and proper training, anyone can experience the thrill of catching and riding waves in the ocean." + } + ] + }, + { + "id": "000000091544", + "image": "000000091544.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many people are in the image?" + }, + { + "from": "gpt", + "value": "There are two people in the image." + }, + { + "from": "human", + "value": "What type of clothing are the people wearing in the image?" + }, + { + "from": "gpt", + "value": "The people in the image are wearing jackets or coats, possibly leather or black in color." + }, + { + "from": "human", + "value": "What activity are the two people engaged in?" + }, + { + "from": "gpt", + "value": "The two people are walking next to each other on a street, likely during the day." + }, + { + "from": "human", + "value": "Is there a traffic light visible in the image?" + }, + { + "from": "gpt", + "value": "Yes, there is a yellow traffic light visible in the image." + }, + { + "from": "human", + "value": "Can you tell if the two people are walking close together or maintaining some distance?" + }, + { + "from": "gpt", + "value": "The image shows the two people walking close together, as seen in the close-up view of them." + } + ] + }, + { + "id": "000000148540", + "image": "000000148540.jpg", + "conversations": [ + { + "from": "human", + "value": "\nAre people getting off a train or a bus in the image?" + }, + { + "from": "gpt", + "value": "People are getting off a train in the image." + }, + { + "from": "human", + "value": "Can you describe the colors of the train?" + }, + { + "from": "gpt", + "value": "The train is blue and pink in color." + }, + { + "from": "human", + "value": "What is happening at the platform when the train stops?" + }, + { + "from": "gpt", + "value": "When the train stops at the platform, the door opens, and people start to exit the train. Among them, there is a couple carrying their luggage." + }, + { + "from": "human", + "value": "Is there any luggage seen on the platform?" + }, + { + "from": "gpt", + "value": "Yes, there is a suitcase on the ground near a pole on the platform." + }, + { + "from": "human", + "value": "How many suitcases does the couple carry?" + }, + { + "from": "gpt", + "value": "Unfortunately, the image does not explicitly show the exact number of suitcases the couple is carrying." + } + ] + }, + { + "id": "000000257752", + "image": "000000257752.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow is the teddy bear positioned in the image?" + }, + { + "from": "gpt", + "value": "The teddy bear is hanging by a string or thread in the image." + }, + { + "from": "human", + "value": "What is happening to the teddy bear in terms of the surrounding elements?" + }, + { + "from": "gpt", + "value": "Water droplets are falling on the teddy bear as it's hanging by the string." + }, + { + "from": "human", + "value": "What is the background like in the image?" + }, + { + "from": "gpt", + "value": "The background of the image features a dirt wall, which could indicate an outdoor or cave-like setting." + }, + { + "from": "human", + "value": "Could you describe a possible scenario or story behind this image?" + }, + { + "from": "gpt", + "value": "The image of the teddy bear hanging by a string and having water droplets fall on it in front of a dirt wall creates an intriguing and somewhat mysterious scene. One possible interpretation of this image could be an art installation or a creative photography concept, focusing on the juxtaposition of a soft, innocent teddy bear with the harsh, natural elements of water and earth.\n\nAnother possible scenario could be that the teddy bear represents an abandoned or lost toy, left to the elements and the passage of time. The hanging position of the teddy bear and the falling water droplets might symbolize feelings of loneliness, vulnerability, or helplessness. \n\nIn either case, the image's composition and subject evoke a sense of curiosity and may lead the viewer to wonder about the intentions, emotions, or stories behind the scene. The image thus becomes a visual metaphor or prompt for creating and exploring narratives within the context of the photograph." + } + ] + }, + { + "id": "000000426016", + "image": "000000426016.jpg", + "conversations": [ + { + "from": "human", + "value": "How many orange parking meters are there in the image?\n" + }, + { + "from": "gpt", + "value": "There are two orange parking meters in the image." + }, + { + "from": "human", + "value": "What is the color of the parking meters?" + }, + { + "from": "gpt", + "value": "The parking meters are orange." + }, + { + "from": "human", + "value": "Where are the parking meters in relation to trees and bushes?" + }, + { + "from": "gpt", + "value": "The parking meters are positioned next to a tree and near some bushes." + }, + { + "from": "human", + "value": "Is there any caution tape on the parking meter?" + }, + { + "from": "gpt", + "value": "Yes, there is caution tape wrapped around the parking meter, possibly indicating that it is out of order or needs attention." + } + ] + }, + { + "id": "000000242544", + "image": "000000242544.jpg", + "conversations": [ + { + "from": "human", + "value": "What color is the horse in the image?\n" + }, + { + "from": "gpt", + "value": "The horse in the image is brown." + }, + { + "from": "human", + "value": "Is the horse stationary or in motion?" + }, + { + "from": "gpt", + "value": "The horse is in motion, walking through the field." + }, + { + "from": "human", + "value": "What is the woman doing in the image?" + }, + { + "from": "gpt", + "value": "The woman is standing in front of the brown horse and looking at it." + }, + { + "from": "human", + "value": "What is the setting of the image?" + }, + { + "from": "gpt", + "value": "The setting of the image is a field, where the brown horse is walking, and the woman is standing in front of it observing the horse." + } + ] + }, + { + "id": "000000238511", + "image": "000000238511.jpg", + "conversations": [ + { + "from": "human", + "value": "How many giraffes are in the image?\n" + }, + { + "from": "gpt", + "value": "There are four giraffes in the image." + }, + { + "from": "human", + "value": "What is the terrain like in the image?" + }, + { + "from": "gpt", + "value": "The terrain in the image is grassy, either resembling a grassy field or a grassy hill." + }, + { + "from": "human", + "value": "What is the relationship among the giraffes in the image?" + }, + { + "from": "gpt", + "value": "The giraffes are standing near each other, forming a group in the grassy area." + }, + { + "from": "human", + "value": "What is the significance or importance of giraffes living in groups?" + }, + { + "from": "gpt", + "value": "Giraffes living in groups, also known as tower or a journey, is an essential aspect of their social structure and behavior. Being social animals, their group dynamics offer them several advantages – both in terms of survival and maintaining their well-being. Some key benefits of giraffes living in groups include:\n\n1. Protection: Staying in groups helps giraffes protect themselves and their offspring from predators, such as lions and hyenas. By maintaining a collective vigilance, they can quickly identify and respond to potential threats, alerting other members of the group and helping to protect each other.\n\n2. Improved foraging: Living in groups enables giraffes to locate and access food resources more efficiently, as they can share information about the location and quality of food sources. This can be particularly beneficial during periods of food scarcity, which characterizes their arid and semi-arid habitats.\n\n3. Social interaction: For giraffes, taking part in social activities and forming bonds with other group members is essential for their emotional well-being. This includes activities such as grooming, playing, and engaging in mock fights, which help giraffes establish and maintain strong relationships, learn essential survival skills, and communicate with one another.\n\n4. Reproduction: Living in groups facilitates breeding activity among giraffes, as young, sexually mature individuals can easily find prospective mating partners. This not only ensures the continuity and propagation of their species but also contributes to maintaining genetic diversity within the population.\n\nIn summary, the group structure of giraffes is vital for their survival, well-being, and thriving in their natural habitats. By living in groups, they can better protect themselves, locate and share resources, engage in social interaction and bonding, and reproduce to sustain and strengthen their population." + } + ] + }, + { + "id": "000000284454", + "image": "000000284454.jpg", + "conversations": [ + { + "from": "human", + "value": "\nIs the street mostly empty or are there cars on it?" + }, + { + "from": "gpt", + "value": "The street is mostly empty, with only a few cars on it." + }, + { + "from": "human", + "value": "What type of traffic control device is visible in the image?" + }, + { + "from": "gpt", + "value": "An unlit stoplight is visible at the corner of the street in the image." + }, + { + "from": "human", + "value": "Are there people standing on the corners of the intersection?" + }, + { + "from": "gpt", + "value": "No, there are no people standing on the corners of the intersection in the image." + }, + { + "from": "human", + "value": "What is the condition of the vegetation along the street?" + }, + { + "from": "gpt", + "value": "The street is lined with lush green trees, indicating healthy and well-maintained vegetation." + }, + { + "from": "human", + "value": "Why might an unlit stoplight be present on an empty street?" + }, + { + "from": "gpt", + "value": "An unlit stoplight could be present on an empty street due to a few reasons. It may indicate that the traffic light is malfunctioning or temporarily turned off for repair or maintenance work. Alternatively, it could be a time of day when traffic in the area is minimal, and the local traffic management authority might have set the stoplight to remain unlit to save energy.\n\nIt is also possible that the photo was taken in an area where traffic is typically low, and the stoplight serves as a cautionary measure rather than active traffic control. The presence of few cars on the street suggests that it is not a heavily trafficked area, which might contribute to the decision of the local traffic authority to keep the stoplight unlit during less busy periods.\n\nNonetheless, the unlit stoplight contributes to the calm and uncrowded ambiance depicted in the image, as it hints at the lack of congestion and vehicle-related noise pollution commonly found in urban environments. The lush green trees lining the street further enhance the serene impression created by the mostly empty street and its unlit stoplight." + } + ] + }, + { + "id": "000000581674", + "image": "000000581674.jpg", + "conversations": [ + { + "from": "human", + "value": "How many kitchen workers are visible in the image?\n" + }, + { + "from": "gpt", + "value": "There are several kitchen workers visible in the image, which indicates that there is a team of chefs working together in the commercial kitchen." + }, + { + "from": "human", + "value": "What are the chefs doing in the kitchen?" + }, + { + "from": "gpt", + "value": "The chefs are busy preparing dishes and cooking in the commercial kitchen, gearing up for the upcoming dinner service." + }, + { + "from": "human", + "value": "What kind of kitchen is shown in the image: a household kitchen, a commercial kitchen, or a small kitchen?" + }, + { + "from": "gpt", + "value": "The image shows a commercial kitchen, where a team of chefs is working together to prepare dishes and cook for their patrons." + }, + { + "from": "human", + "value": "What are some common characteristics and layout of a commercial kitchen?" + }, + { + "from": "gpt", + "value": "Commercial kitchens are designed to handle high-volume food production and are typically found in restaurants, hotels, catering services, and other food service establishments. Common characteristics and layouts of commercial kitchens include:\n\n1. Space: A commercial kitchen is usually larger than a household kitchen to accommodate multiple chefs and staff working together, as well as the required equipment for large-scale food production.\n\n2. Zoning and Workflow: One of the crucial aspects of planning a commercial kitchen is to create zones and establish a proper workflow for efficient operation. These zones often include specific areas for food preparation, cooking, plating, cleaning, and storage. This organization allows the staff to work more efficiently and minimize cross-contamination risks.\n\n3. Equipment: Commercial kitchens are equipped with professional-grade appliances and tools, such as ranges, ovens, grills, fryers, refrigeration units, and stainless steel worktables. This heavy-duty equipment is designed for continuous, high-volume use and is built to withstand the rigors of the commercial kitchen environment.\n\n4. Ventilation: Proper ventilation is essential in a commercial kitchen, both for maintaining air quality and for removing excess heat, steam, and cooking odors. A well-designed ventilation system typically includes exhaust hoods, air supply fans, ducts, and air purification components.\n\n5. Safety and Hygiene: In a commercial kitchen, various local and national regulations govern food safety, sanitation, and hygiene practices. Some common features that contribute to a safe and sanitary kitchen include non-slip flooring, easily cleanable surfaces, designated handwashing stations, and properly stored chemicals and cleaning supplies. \n\nOverall, a commercial kitchen is a specialized working environment designed for high-volume food production and requires careful planning, sturdy equipment, efficient workflow management, and strict adherence to safety and hygiene standards." + } + ] + }, + { + "id": "000000549117", + "image": "000000549117.jpg", + "conversations": [ + { + "from": "human", + "value": "How many trays of doughnuts are visible in the image?\n" + }, + { + "from": "gpt", + "value": "There are four trays of doughnuts visible in the image." + }, + { + "from": "human", + "value": "What is the setting of this image?" + }, + { + "from": "gpt", + "value": "The setting of this image is a doughnut shop with a display case showcasing a large assortment of different types of doughnuts on trays." + }, + { + "from": "human", + "value": "Are the doughnuts on display all the same or are there different varieties?" + }, + { + "from": "gpt", + "value": "The doughnuts on display come in a variety of different types and flavors, showcasing a large assortment to choose from." + }, + { + "from": "human", + "value": "What can you say about the presentation of the doughnuts?" + }, + { + "from": "gpt", + "value": "The doughnuts are presented neatly on trays in a display case, making it visually appealing and easy for customers to view the variety of options available for purchase. The organized arrangement of the doughnuts also helps customers to pick and choose their desired treats based on their preferences for flavors and textures." + }, + { + "from": "human", + "value": "What factors might influence a customer's decision when choosing doughnuts from this display?" + }, + { + "from": "gpt", + "value": "When choosing doughnuts from this display, a customer's decision may be influenced by several factors:\n\n1. Visual appeal: The appearance of the doughnuts can play a significant role in attracting customers to try different flavors or designs. Customers might be drawn to doughnuts with unique toppings, vibrant colors, or interesting shapes.\n\n2. Variety: With a wide assortment of different doughnuts available, customers may be tempted to try new or unfamiliar flavors, or stick to their personal favorites.\n\n3. Personal preference: Customers often have personal tastes that dictate their choices—some may prefer doughnuts with fruit fillings or glazes, while others might enjoy cream-filled or chocolate-covered doughnuts.\n\n4. Dietary restrictions or considerations: Some customers may need to take into account factors such as allergies or dietary preferences, like avoiding gluten or opting for plant-based options.\n\n5. Sensitivity to taste or texture: The range of doughnuts available might cater to customers who prefer soft and fluffy doughnuts or those who like crispy and dense doughnuts.\n\nIn summary, when choosing doughnuts from this diverse display, customers may consider factors such as visual appeal, variety, personal preferences, dietary restrictions and considerations, as well as their sensitivity to taste or texture. The unique array of doughnuts showcased in the store allows customers to find options that cater to their individual tastes or even explore new flavors." + } + ] + }, + { + "id": "000000506832", + "image": "000000506832.jpg", + "conversations": [ + { + "from": "human", + "value": "What color are the boats in the image?\n" + }, + { + "from": "gpt", + "value": "The boats in the image are blue." + }, + { + "from": "human", + "value": "How are the boats positioned in the body of water?" + }, + { + "from": "gpt", + "value": "The boats are tied together, floating in a body of water, and they appear to be docked or stored in a storage type area." + }, + { + "from": "human", + "value": "What are the boats made of?" + }, + { + "from": "gpt", + "value": "The boats are made of wood." + }, + { + "from": "human", + "value": "Are the boats unique or identical in design?" + }, + { + "from": "gpt", + "value": "The boats are identical in design, suggesting that they serve a common purpose or belong to a specific organization." + }, + { + "from": "human", + "value": "What could be the reason for so many boats being tied together in this manner?" + }, + { + "from": "gpt", + "value": "There could be several reasons why so many boats are tied together in this manner:\n\n1. Storage: The boats may be stored together when not in use, creating a more organized and efficient storage system, as well as making it easier to perform maintenance and prepare the boats for use.\n\n2. Rental Services: If these boats are part of a rental service, tying them together allows for easier management and supervision of the rental inventory. It is also convenient for customers, as they can easily choose a boat from the available selection.\n\n3. Training or Group Activities: In the case of training sessions or group activities, having many identical boats tied together allows for easy access, distribution, and instruction of participants, ensuring a consistent experience for everyone involved.\n\n4. Protecting the Boats: Tying boats together can provide additional stability and protection against potential damage from water currents, winds, or collisions with other vessels.\n\nIn summary, there could be various reasons for so many blue boats being tied together, including storage, rental services, training/group activities, or protecting them from potential damage in the water." + } + ] + }, + { + "id": "000000440389", + "image": "000000440389.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the baseball player doing in the image?\n" + }, + { + "from": "gpt", + "value": "The baseball player in the image is swinging a baseball bat, attempting to hit a baseball pitch during a game." + }, + { + "from": "human", + "value": "What is the general setting of the image?" + }, + { + "from": "gpt", + "value": "The general setting of the image is a baseball game, where the baseball player is at bat and trying to hit the pitch thrown by the opposing team's pitcher." + }, + { + "from": "human", + "value": "What is the batter's position when swinging at the pitch?" + }, + { + "from": "gpt", + "value": "The batter is taking a low and close swing at the baseball pitch, indicating that they're trying to hit a low-lying ball during the game." + }, + { + "from": "human", + "value": "What specific skills are required for a batter to successfully hit a baseball pitch?" + }, + { + "from": "gpt", + "value": "Several specific skills are required for a batter to successfully hit a baseball pitch. Some of these skills include:\n\n1. Hand-eye coordination: Hitting a baseball requires exceptional hand-eye coordination to accurately judge the trajectory and speed of the incoming pitch, and adjust the swing accordingly.\n\n2. Timing: Precise timing is essential for a batter, as it determines when to initiate the swing to make contact with the ball at the ideal moment, resulting in a solid hit.\n\n3. Bat speed: Generating bat speed is crucial for hitting the ball with enough force to send it far into the field. A quicker swing can offer more power and distance on the hit.\n\n4. Body mechanics: Proper body mechanics ensure an efficient, fluid swing movement that uses the batter's entire body. This includes weight transfer, hip rotation, and upper body torque, which all contribute to generating power in the swing.\n\n5. Pitch recognition: A successful batter must be able to recognize different types of pitches (e.g., fastballs, curveballs, sliders, or changeups) and quickly adjust their swing approach accordingly.\n\n6. Plate discipline: Good plate discipline involves being selective about which pitches to swing at, refraining from swinging at bad pitches, and knowing when to take a pitch or when to attempt a hit.\n\n7. Mental toughness: Baseball is a game of failure, and it is essential for batters to maintain a strong, positive mindset, learning from their mistakes and staying focused on each individual at-bat.\n\nIn summary, a batter's success in hitting a baseball pitch relies on a combination of essential skills, like hand-eye coordination, timing, bat speed, proper body mechanics, pitch recognition, plate discipline, and mental toughness. These skills help ensure a solid and efficient swing, increasing the chances of making contact with the ball and ultimately optimizing their performance at the plate." + } + ] + }, + { + "id": "000000562138", + "image": "000000562138.jpg", + "conversations": [ + { + "from": "human", + "value": "What colors are on the bus in the image?\n" + }, + { + "from": "gpt", + "value": "The bus in the image has white and red colors." + }, + { + "from": "human", + "value": "How is the traffic situation on the street where the bus is driving?" + }, + { + "from": "gpt", + "value": "The traffic situation on the street has a group of cars, and the bus seems to be driving ahead of them. This suggests that there is at least some level of traffic, but it's not possible to determine the exact severity of the congestion from the image alone." + }, + { + "from": "human", + "value": "What type of road is the bus driving on?" + }, + { + "from": "gpt", + "value": "The bus is driving on a street. However, the specifics like the number of lanes or the type of street (residential or commercial) cannot be determined from the image alone." + }, + { + "from": "human", + "value": "Is there any notable location around the bus?" + }, + { + "from": "gpt", + "value": "Yes, the bus is traveling down a street that is located near a park." + }, + { + "from": "human", + "value": "What mode of transportation is the main focus of the image?" + }, + { + "from": "gpt", + "value": "The main focus of the image is a bus driving down a street in front of several cars." + } + ] + }, + { + "id": "000000523914", + "image": "000000523914.jpg", + "conversations": [ + { + "from": "human", + "value": "What are the main components of the building in the image?\n" + }, + { + "from": "gpt", + "value": "The main components of the building in the image are a large brick building with a tower and a clock on it. The clock is located on the tower, which is next to or part of the building." + }, + { + "from": "human", + "value": "What is the color of the sky in the image?" + }, + { + "from": "gpt", + "value": "The color of the sky in the image is a beautiful blue." + }, + { + "from": "human", + "value": "What material is the building made of?" + }, + { + "from": "gpt", + "value": "The building is made of brick." + }, + { + "from": "human", + "value": "Can you describe the design and architecture of the building with the tower clock?" + }, + { + "from": "gpt", + "value": "Based on the image, the building appears to be a large brick structure with a sizable tower that houses a clock. The presence of a tower clock indicates that the architecture of the building might be influenced by classical or historical styles, as tower clocks are often associated with older structures or landmarks.\n\nThe brick material used for the construction of the building suggests durability and a sense of timelessness, which further emphasizes the traditional and historical aesthetic. The design might also include other architectural features such as archways or decorative elements which are typical of classical building styles.\n\nIn addition to the clock, the tower itself is likely to be a prominent element of the building's architecture. Its distinct presence adds a sense of height and grandeur to the structure, making it both visually appealing and potentially a local point of interest. The tower and clock also serve a practical purpose, providing accurate timekeeping for the surrounding area and enhancing the building's overall functionality.\n\nOverall, the building with the tower clock in the image reflects an architectural style that embraces tradition, historical influences, and a sense of timelessness. The combination of brick material, the tower, and the clock make for an imposing and visually striking structure against the beautiful blue sky background." + } + ] + }, + { + "id": "000000050706", + "image": "000000050706.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the person doing with the blender on the counter?" + }, + { + "from": "gpt", + "value": "The person is using the blender on the tile counter, specifically pressing a button to operate it. It contains water inside, which is being blended." + }, + { + "from": "human", + "value": "What is the surface material of the counter where the blender is placed?" + }, + { + "from": "gpt", + "value": "The counter surface is made of tiles." + }, + { + "from": "human", + "value": "What is inside the blender?" + }, + { + "from": "gpt", + "value": "There is water inside the blender." + }, + { + "from": "human", + "value": "Is the blender in use, or is it just sitting on the counter?" + }, + { + "from": "gpt", + "value": "The blender is in use, as someone is pressing a button on it to blend the water inside." + } + ] + }, + { + "id": "000000132303", + "image": "000000132303.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many zebras are visible in the image?" + }, + { + "from": "gpt", + "value": "There are six zebras visible in the image." + }, + { + "from": "human", + "value": "What are the zebras doing in the image?" + }, + { + "from": "gpt", + "value": "The zebras are grazing on the grass at the side of a dirt road, enjoying their natural environment." + }, + { + "from": "human", + "value": "What type of road are the zebras located near?" + }, + { + "from": "gpt", + "value": "The zebras are located near a dirt road." + }, + { + "from": "human", + "value": "Are the zebras standing on top of the road or beside it?" + }, + { + "from": "gpt", + "value": "The zebras are standing beside the dirt road while grazing on the grass." + }, + { + "from": "human", + "value": "Can you describe a zebra's habitat and some characteristics of their natural environment?" + }, + { + "from": "gpt", + "value": "Zebras are native to Africa and inhabit a variety of ecosystems, including grasslands, savannas, woodlands, and even mountainous regions. Their natural environment consists of vast, open spaces with plenty of grass for grazing and an adequate water supply. Zebras are herbivorous animals, and their diet mainly consists of grass, but they can also consume leaves, shrubs, and other vegetation if necessary.\n\nOne of the key elements of a zebra's habitat is the availability of grass for grazing. Grasslands and savannas, which are among the most common habitats for zebras, offer an abundant supply of food. These ecosystems typically have a warm to hot climate, with alternating wet and dry seasons. During wet seasons, the grass grows rapidly, providing ample food for the zebras. The dry season brings a scarcity of food, prompting the zebras to move in search of better grazing opportunities.\n\nAnother essential aspect of their habitat is access to water sources. Zebras require a regular supply of water to maintain their health and well-being. In their natural environment, they can be found near rivers, lakes, or waterholes, which provide hydration and support their physiological needs.\n\nSocial structure is also an important aspect of a zebra's life. They live in groups called \"harems\" or \"herds,\" consisting of one stallion (male zebra) and a group of mares (female zebras) with their offspring. This social structure provides safety, as zebras rely on the protection and strength of the group to fend off potential predators, such as lions, hyenas, and cheetahs.\n\nIn summary, a zebra's natural environment includes open grasslands, savannas, or woodlands, providing ample food for grazing and nearby water sources for hydration. Their social structure includes living in harems or herds, which offer safety and protection against potential threats in their ecosystem." + } + ] + }, + { + "id": "000000246909", + "image": "000000246909.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat pieces of furniture can be seen in the bathroom?" + }, + { + "from": "gpt", + "value": "In the bathroom, you can see a vanity sink, a toilet, and a mirror." + }, + { + "from": "human", + "value": "What is the color of the bathroom furniture?" + }, + { + "from": "gpt", + "value": "The bathroom furniture is all white." + }, + { + "from": "human", + "value": "Is there a window in the bathroom?" + }, + { + "from": "gpt", + "value": "Yes, there is a window in the bathroom." + }, + { + "from": "human", + "value": "What type of flooring does the bathroom have?" + }, + { + "from": "gpt", + "value": "The bathroom has a tiled floor." + }, + { + "from": "human", + "value": "How would you describe the overall appearance of the bathroom?" + }, + { + "from": "gpt", + "value": "The overall appearance of the bathroom can be described as clean, plain, and well-organized, with all-white furniture and a tiled floor. The presence of the window provides some natural light and ventilation, contributing to the clean and fresh atmosphere. The bathroom contains essential items, such as a vanity sink, toilet, and mirror, making it functional and practical for everyday use." + } + ] + }, + { + "id": "000000163030", + "image": "000000163030.jpg", + "conversations": [ + { + "from": "human", + "value": "What type of animal is seen in the image?\n" + }, + { + "from": "gpt", + "value": "The image shows a cow." + }, + { + "from": "human", + "value": "What is the cow doing in the image?" + }, + { + "from": "gpt", + "value": "The cow is rummaging and eating garbage on the side of a road." + }, + { + "from": "human", + "value": "What kind of structure is near the cow?" + }, + { + "from": "gpt", + "value": "There is a brick building near the cow." + }, + { + "from": "human", + "value": "What other objects or transportation can be found in this image?" + }, + { + "from": "gpt", + "value": "In the image, there is a parked scooter under a bridge near the cow." + }, + { + "from": "human", + "value": "What could be the possible implications of the cow's actions in the image, and what might be done to address the issue?" + }, + { + "from": "gpt", + "value": "The cow rummaging and eating garbage on the side of the road suggests a lack of proper waste management in the area, resulting in garbage being dumped in public spaces. This not only poses health risks for animals like the cow as they consume potentially harmful materials, but it also has a negative impact on the environment and the aesthetics of the location.\n\nTo address this issue, it is crucial to implement proper waste management practices, such as providing community members with accessible means for disposing their garbage in a legal and environmentally friendly manner. This could include measures like installing designated trash bins at appropriate intervals, offering regular waste collection services, and promoting waste reduction and recycling initiatives.\n\nMoreover, public awareness campaigns should be carried out to educate the local population about the adverse effects of improper waste disposal on animal health, the environment, and the overall well-being of the community. Additionally, local authorities may need to enforce strict penalties for those caught dumping trash illegally, as a means to discourage such practices.\n\nIn terms of ensuring the welfare of animals like the cow, local animal protection groups or government bodies may need to step in to rescue and relocate them to more suitable environments, where they can acquire appropriate food and care. With cooperation from the community and local authorities, these efforts can lead to better health for animals, a cleaner environment, and an overall improved quality of life for residents." + } + ] + }, + { + "id": "000000048339", + "image": "000000048339.jpg", + "conversations": [ + { + "from": "human", + "value": "What activity are the people in the field participating in?\n" + }, + { + "from": "gpt", + "value": "The people in the field are participating in flying kites." + }, + { + "from": "human", + "value": "What is the condition of the sky in the image?" + }, + { + "from": "gpt", + "value": "The sky is blue and clear, which makes it a suitable day for kite flying." + }, + { + "from": "human", + "value": "Are the kites flying at a lower altitude, near trees, or at a higher altitude above the trees?" + }, + { + "from": "gpt", + "value": "The kites are flying at a higher altitude, above the trees." + }, + { + "from": "human", + "value": "How would you describe the location where the people are flying kites?" + }, + { + "from": "gpt", + "value": "The people are flying kites in a large open field or a park, which provides ample space for this outdoor activity." + }, + { + "from": "human", + "value": "Why is kite flying a popular outdoor activity, and what characteristics make an ideal day for it?" + }, + { + "from": "gpt", + "value": "Kite flying is a popular outdoor activity for several reasons:\n\n1. It is accessible to people of all ages and requires minimal equipment, which makes it an inclusive and affordable pastime. Participants can either buy a pre-made kite or make their own with simple materials.\n\n2. Flying a kite promotes physical activity, as participants usually have to keep moving to keep their kites aloft, thereby increasing their level of exercise.\n\n3. Kite flying can also be a social event, allowing people to bond with family and friends or interact with fellow kite flyers in the park or field. It encourages communication, cooperation, and friendly competition.\n\n4. This activity can also be calming and meditative for some people. The act of flying a kite requires focus, which can help individuals disconnect from daily stresses and immerse themselves in the moment.\n\nIdeal conditions for a kite flying day include:\n\n1. Adequate wind: Light to moderate wind is essential for successful kite flying. Too little wind makes it difficult for the kite to be lifted up and stay aloft, while too much wind can cause the kite to become uncontrollable or crash.\n\n2. Clear skies: A sunny, blue sky enhances the kite flying experience, as the colors and patterns of the kite can be better appreciated. Also, a clear sky usually indicates fair weather, which is more comfortable for participants.\n\n3. Open space: Ideal kite flying locations include large fields, parks, or beaches with ample space for the kites to take off and fly without the risk of getting tangled in trees, power lines, or other obstacles.\n\nIn summary, kite flying is a popular outdoor activity due to its accessibility, the physical and social benefits it offers, and its therapeutic nature. A day with clear, blue skies, light to moderate wind, and an open space like a park or field is ideal for this enjoyable pastime." + } + ] + }, + { + "id": "000000545053", + "image": "000000545053.jpg", + "conversations": [ + { + "from": "human", + "value": "Is there a boat in the image? If so, where is it?\n" + }, + { + "from": "gpt", + "value": "Yes, there is a large boat in the image, and it is parked next to a white pier or dock on the water." + }, + { + "from": "human", + "value": "What is the color of the pier in the image?" + }, + { + "from": "gpt", + "value": "The color of the pier in the image is white." + }, + { + "from": "human", + "value": "What is the weather like in the image, and how can you tell?" + }, + { + "from": "gpt", + "value": "The weather in the image appears to be cloudy, possibly indicating an oncoming storm. This can be observed by the clouds in the sky and the reflection of the cloudy sky in the waters beneath the white dock. There might also be a sunset taking place." + }, + { + "from": "human", + "value": "What elements can be seen in the sky and on the water?" + }, + { + "from": "gpt", + "value": "In the sky, there are clouds that create a dramatic scene, possibly indicating an oncoming storm or the presence of a sunset during that time. On the water, you can see boats, including a large boat sitting at the dock, and the reflection of the cloudy sky." + }, + { + "from": "human", + "value": "Are there any buildings or structures visible near the dock?" + }, + { + "from": "gpt", + "value": "Yes, there is a building near the dock, which can be seen alongside the boats and the water." + } + ] + }, + { + "id": "000000461543", + "image": "000000461543.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many skateboarders are in the image?" + }, + { + "from": "gpt", + "value": "There are three skateboarders in the image." + }, + { + "from": "human", + "value": "What are the other two skateboarders doing while one is performing a jump?" + }, + { + "from": "gpt", + "value": "The other two skateboarders are watching the skateboarder who is performing the jump, likely observing his trick and technique." + }, + { + "from": "human", + "value": "What is the skateboarder doing while being airborne?" + }, + { + "from": "gpt", + "value": "The airborne skateboarder is performing a trick, showcasing his skills and control of the skateboard as he jumps." + }, + { + "from": "human", + "value": "Where is this event taking place?" + }, + { + "from": "gpt", + "value": "The event is taking place at a skate park, which is a location specifically designed for skateboarding and other related activities." + } + ] + }, + { + "id": "000000553392", + "image": "000000553392.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many zebras are present in the image?" + }, + { + "from": "gpt", + "value": "There are three zebras present in the image." + }, + { + "from": "human", + "value": "What are the zebras doing in the image?" + }, + { + "from": "gpt", + "value": "In the image, a baby zebra is feeding from its mother while a third zebra is nearby. The baby zebra is also nuzzling the larger zebra, possibly as a form of interaction or bonding. The third zebra appears to be inspecting the other two as they stand together in the grassy area." + }, + { + "from": "human", + "value": "What is the setting of the image?" + }, + { + "from": "gpt", + "value": "The zebras are standing in a large, grassy field with tall grass surrounding them. This type of environment suggests a natural habitat where they can graze and interact with each other." + }, + { + "from": "human", + "value": "What color pattern do zebras have?" + }, + { + "from": "gpt", + "value": "Zebras have a distinctive black and white striped pattern. This pattern serves multiple purposes, such as camouflage, temperature regulation, and even deterring insects. The stripes can make it more challenging for predators like lions to single out individual zebras in a group when they are moving together." + }, + { + "from": "human", + "value": "Why is the baby zebra feeding from the mother?" + }, + { + "from": "gpt", + "value": "The baby zebra is feeding from its mother to obtain essential nutrients for growth and development. Much like other mammals, baby zebras rely on their mothers' milk during the initial stages of their life. This milk is rich in proteins, fats, and antibodies that help the young zebra build a strong immune system and gain strength for life in the wild.\n\nAs the baby zebra grows, it will gradually transition to an herbivorous diet, primarily consisting of grass. This dietary shift will take place over several months, during which the mother will continue to nurse her offspring while also teaching it essential skills needed for survival, such as finding food and water sources, as well as staying safe from predators. Feeding from the mother also provides an opportunity for bonding and nurturing between the mother and baby, fostering a strong attachment that ensures the young zebra's well-being and appropriate development." + } + ] + }, + { + "id": "000000512088", + "image": "000000512088.jpg", + "conversations": [ + { + "from": "human", + "value": "How is the lighting in the kitchen?\n" + }, + { + "from": "gpt", + "value": "The lighting in the kitchen is very dim, making it difficult to see all the details clearly." + }, + { + "from": "human", + "value": "What is the condition of the refrigerator in the image?" + }, + { + "from": "gpt", + "value": "The refrigerator in the image is knocked over, lying on its back. This indicates that there has been some sort of accident or damage in the kitchen." + }, + { + "from": "human", + "value": "What other appliances are visible in the kitchen?" + }, + { + "from": "gpt", + "value": "Apart from the refrigerator, there is an oven and a cooker present in the kitchen." + }, + { + "from": "human", + "value": "Considering the overturned refrigerator and other damage, what potential risks are there in such a situation?" + }, + { + "from": "gpt", + "value": "Having an overturned refrigerator and other damage in a kitchen poses several potential risks and hazards:\n\n1. Electrical hazards: With appliances like refrigerators, ovens, and cookers in the kitchen, they may still be connected to power, causing electrical hazards if the connections are compromised because of the damage. There's a possibility of electric shock and short-circuits, which can be dangerous for anyone present in the kitchen.\n\n2. Gas leaks: If the cooker is a gas-powered stove, the damage to the kitchen may cause gas leaks, putting people at risk of respiratory issues, fires, or even explosions if left unnoticed or unaddressed.\n\n3. Injuries: The overturned refrigerator and other damaged items can pose a risk of physical injuries for anyone trying to use the kitchen. The sharp edges and heavy objects could lead to cuts, bruises, or worse.\n\n4. Spoiled food: The refrigerator knocked over implies that the fridge has been out of commission for some time, causing the food stored to spoil. Consuming spoiled food can lead to food poisoning or other health issues.\n\n5. Pest infestation: The presence of spoiled food and possible messes created by the damage may attract pests, such as rodents and insects, which can pose hygiene issues and further aggravate the situation in the kitchen.\n\nTo address these risks, it's crucial to identify and fix the causes of damage, disconnect the power and gas supply to the affected appliances, and clean up any spills or debris. Care should be taken when handling the damaged appliances to avoid injuries. Finally, proper maintenance and a thorough inspection of the kitchen should be carried out to prevent any future accidents or damages." + } + ] + }, + { + "id": "000000206444", + "image": "000000206444.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the location of the boat in the image?\n" + }, + { + "from": "gpt", + "value": "The boat in the image is located on a river or canal, either docked at a pier or floating near the dock, depending on the specific scene being observed." + }, + { + "from": "human", + "value": "What type of environment is the boat surrounded by?" + }, + { + "from": "gpt", + "value": "The boat is surrounded by a natural environment with many trees alongside the river or canal." + }, + { + "from": "human", + "value": "Is the boat moving or stationary in the image?" + }, + { + "from": "gpt", + "value": "It is not possible to definitively determine from the image whether the boat is moving or stationary. However, we can see that the boat is either docked at a pier or floating near the dock, which implies that it is either stationary or moving at a slow pace." + }, + { + "from": "human", + "value": "Is there any specific symbol, like a cross, visible on the boat?" + }, + { + "from": "gpt", + "value": "Yes, in one of the described scenes, there is a cross on the boat, indicating that it might have a religious or cultural significance, or it could be the boat's unique design." + }, + { + "from": "human", + "value": "Can you tell me more about boats typically found in canals or rivers?" + }, + { + "from": "gpt", + "value": "Boats that are commonly found on canals or rivers vary in type, size, and purpose. Some of the most common types of boats typically seen in these waterways include:\n\n1. Recreational Boats: These boats are designed for leisure activities and can range from small rowboats, kayaks, and canoes to larger vessels like cabin cruisers and pontoon boats. Recreational boats allow people to enjoy activities such as fishing, swimming, and sightseeing along the waterways.\n\n2. Narrowboats: Narrowboats, also known as canal boats or barges, are unique to the narrow canals found in some regions such as the United Kingdom. They are typically long, slender boats designed to navigate the tight spaces of these waterways, and they commonly serve as both transportation and accommodation, functioning as floating homes.\n\n3. Houseboats: Houseboats are larger boats that have been designed to serve as a primary residence or a vacation home. They are typically equipped with living spaces, including bedrooms, kitchens, and bathrooms, and they often remain docked for extended periods, with the option to move when the owner desires.\n\n4. Commercial Boats: Some boats on rivers and canals serve commercial purposes, such as cargo transportation, passenger transportation, or tourism. Examples include ferries, water taxis, and tour boats.\n\n5. Historic or Themed Boats: In some cases, boats found on canals and rivers are replicas or restored versions of historic vessels, like steam-powered paddleboats or sailboats. These boats often serve as tourist attractions, offering a glimpse into the history of the region or the specific type of boat.\n\nIn conclusion, boats found on canals and rivers can vary significantly in terms of their design, purpose, and size. Ranging from small recreational boats to large commercial vessels or historic themed boats, they contribute to the diversity and vibrancy of the waterways they navigate." + } + ] + }, + { + "id": "000000240511", + "image": "000000240511.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat objects are present in front of the house in the image?" + }, + { + "from": "gpt", + "value": "In front of the house, there is a pile of logs, a cut tree, and a refrigerator with a painting on its side. There is also a fence and trees nearby." + }, + { + "from": "human", + "value": "What type of house is shown in the image, and where is it possibly located?" + }, + { + "from": "gpt", + "value": "The image shows a house that appears to be characteristic of a New Orleans-style architecture. The presence of a cut tree and refrigerator in front of the house may indicate that the location is in New Orleans or a similar area." + }, + { + "from": "human", + "value": "What is unique about the refrigerator?" + }, + { + "from": "gpt", + "value": "The refrigerator is unique because it has a painting on the side of it, making it a decorated outdoor refrigerator. This adds an artistic touch to the scene in front of the house." + }, + { + "from": "human", + "value": "Could you explain the possible significance of the refrigerator's decoration?" + }, + { + "from": "gpt", + "value": "The decorated refrigerator, along with its placement outdoors, might serve various purposes or make a statement. There could be multiple reasons for such decorations, and some possible explanations include:\n\n1. Artistic expression: The painting on the refrigerator's side may be a unique way for the homeowner or a local artist to express their creativity in an unconventional, attention-grabbing manner. It may also add to the overall aesthetic appeal of the house or the neighborhood.\n\n2. Celebrating the local culture: The decorated refrigerator might be a nod to the vibrant culture and artistic traditions of New Orleans or the local community. Such decorations might contribute to the city's unique character and charm.\n\n3. Statement piece: The outdoor refrigerator with its painting could be making a statement about a specific event, social issue, or even an environmental concern related to the local area. The presence of a cut tree suggests that there might have been a storm or some other event that prompted the display of the refrigerator alongside the debris.\n\n4. Repurposing: The homeowner might have chosen to repurpose the old refrigerator as an outdoor decoration, rather than discarding it or sending it to a landfill. It showcases creative reuse and may convey a message about repurposing items instead of contributing to waste.\n\nWithout further context or input from the people involved, it is difficult to pinpoint the exact motivation behind the refrigerator's decoration. However, it is evident that the painted refrigerator adds a unique element to the scene in front of the house and serves as a distinctive visual feature in the image." + } + ] + }, + { + "id": "000000469781", + "image": "000000469781.jpg", + "conversations": [ + { + "from": "human", + "value": "What type of truck is seen in the image?\n" + }, + { + "from": "gpt", + "value": "A work truck, likely belonging to a power company, is seen in the image." + }, + { + "from": "human", + "value": "What is the truck being used for?" + }, + { + "from": "gpt", + "value": "The truck is being used to lift a person into the air to work on power lines." + }, + { + "from": "human", + "value": "Is a ladder involved with the truck?" + }, + { + "from": "gpt", + "value": "Yes, there is a ladder on the truck that helps the person reach and work on the power lines." + }, + { + "from": "human", + "value": "What precautions are being taken around the work truck?" + }, + { + "from": "gpt", + "value": "Caution cones have been placed on the street around the work truck, likely to alert passersby and ensure the safety of both the worker and the public." + }, + { + "from": "human", + "value": "Can you describe the setting in which the work truck is parked?" + }, + { + "from": "gpt", + "value": "The work truck is parked on a city street next to two brick houses, indicating that the scene takes place within a residential neighborhood." + } + ] + }, + { + "id": "000000120399", + "image": "000000120399.jpg", + "conversations": [ + { + "from": "human", + "value": "What color are the furniture and walls in the room?\n" + }, + { + "from": "gpt", + "value": "The furniture and walls in the room are all white." + }, + { + "from": "human", + "value": "What type of room is it, and what furniture pieces are in the room?" + }, + { + "from": "gpt", + "value": "The room depicted in the image is a bedroom. The furniture pieces in the room include a bed, night table, lounge chair, ottoman, and a rug." + }, + { + "from": "human", + "value": "Does the bedroom have a minimalist design?" + }, + { + "from": "gpt", + "value": "Yes, the bedroom features a minimalist design with all-white color scheme and minimal furniture pieces." + }, + { + "from": "human", + "value": "What is the purpose of an ottoman in a bedroom setting?" + }, + { + "from": "gpt", + "value": "An ottoman in a bedroom setting serves various purposes. It can be used as a place to rest one's feet when seated on the lounge chair, providing extra comfort and relaxation. Ottomans may also be used as additional seating options for guests, or even as an impromptu side table to hold items such as books, a tray with a drink or snacks, or a decorative element like a throw blanket. The versatile nature of ottomans makes them a practical and stylish choice for bedroom design." + } + ] + }, + { + "id": "000000211989", + "image": "000000211989.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many men are in the image?" + }, + { + "from": "gpt", + "value": "There are two men in the image." + }, + { + "from": "human", + "value": "Where are the men standing?" + }, + { + "from": "gpt", + "value": "The men are standing near a boat dock or marina, close to the water." + }, + { + "from": "human", + "value": "Do the men seem to be posing for a picture?" + }, + { + "from": "gpt", + "value": "Yes, the men seem to be posing for a picture next to the boat dock." + }, + { + "from": "human", + "value": "What is the expression on the men's faces?" + }, + { + "from": "gpt", + "value": "The men are smiling, indicating that they are enjoying their time near the boat dock." + }, + { + "from": "human", + "value": "What are some activities people might do at a boat dock or marina?" + }, + { + "from": "gpt", + "value": "At a boat dock or marina, people can engage in various activities, including but not limited to:\n\n1. Boating: Taking boats, yachts, or other watercraft out for recreational or fishing trips.\n2. Fishing: Enjoying a relaxing day of fishing from the dock or while on a boat.\n3. Sightseeing: Admiring the surrounding scenery, boats, and marine life.\n4. Photography: Capturing pictures of the boats, water, nearby wildlife, or posing with friends and family, as shown in the image.\n5. Socializing: Meeting friends, participating in waterside events, or chatting with fellow boating enthusiasts.\n6. Water sports: Participating in sports like kayaking, paddleboarding, jet-skiing, or joining others for a day of sailing or wakeboarding. \n\nThese activities vary depending on the specific amenities and features of the dock or marina, as well as the interests or preferences of the visitors, but overall, they offer a range of fun, relaxing, and engaging experiences for people to enjoy by the water." + } + ] + }, + { + "id": "000000288397", + "image": "000000288397.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat type of bus is featured in the image?" + }, + { + "from": "gpt", + "value": "The image features a double decker bus, which has two levels for passengers to sit or stand." + }, + { + "from": "human", + "value": "What is the position of the double decker bus in the image?" + }, + { + "from": "gpt", + "value": "The double decker bus is parked or stopped at the side of a street, next to a building." + }, + { + "from": "human", + "value": "What are the surroundings of the bus in the image?" + }, + { + "from": "gpt", + "value": "The bus is surrounded by tall buildings, and one of them appears to be a multi-family housing building." + }, + { + "from": "human", + "value": "What colors are featured on the double decker bus?" + }, + { + "from": "gpt", + "value": "The double decker bus has a red and yellow color scheme." + }, + { + "from": "human", + "value": "What is the purpose of a double decker bus and what are the advantages of using one?" + }, + { + "from": "gpt", + "value": "The main purpose of a double decker bus is to transport a larger number of passengers while using the same amount of road space as a single-deck bus, making it an efficient public transportation vehicle. By having two levels for passengers to sit or stand, it can carry more people and reduce the number of vehicles needed to serve a given route, which can help alleviate traffic congestion, lower operational costs, and reduce emissions.\n\nSome advantages of using double decker buses include:\n1. Increased capacity: A double decker bus can accommodate significantly more passengers compared to a single-deck bus, which is particularly beneficial in cities with high public transportation demands.\n2. Enhanced and unobstructed views: The upper deck offers passengers a higher vantage point, providing better views of the surroundings and making the journey more enjoyable, especially in cities with interesting landscapes or architectures.\n3. Accessibility: Double decker buses often feature either low-floor designs or accessible ramps, making it easier for passengers with mobility impairments or those using wheelchairs or strollers to board and exit the bus.\n4. Efficient use of road space: By transporting more passengers within the same vehicle footprint, double decker buses help optimize road usage and overall transportation network efficiency.\n5. Reduced environmental impact: Fewer buses on the roads due to increased passenger capacity means lower overall emissions, decreased fuel consumption, and a smaller environmental impact.\n\nIn summary, a double decker bus serves as an efficient form of public transportation, offering the advantages of increased capacity, better views for passengers, accessibility, more optimal use of road space, and reduced environmental impact." + } + ] + }, + { + "id": "000000307803", + "image": "000000307803.jpg", + "conversations": [ + { + "from": "human", + "value": "Is there a baseball game happening in the image?\n" + }, + { + "from": "gpt", + "value": "Yes, there is a baseball game happening in the image, with a batter swinging a bat on a baseball field." + }, + { + "from": "human", + "value": "What position are the batter and the catcher in the image?" + }, + { + "from": "gpt", + "value": "The batter is on the batting mark, ready to hit the incoming ball, while the catcher is positioned behind the batter at the home plate." + }, + { + "from": "human", + "value": "Is there an umpire present in the image? If so, what is the umpire doing?" + }, + { + "from": "gpt", + "value": "Yes, an umpire is present in the image, and they are closely observing the game as the batter swings at the incoming ball. The umpire is holding their glove out, prepared to make any necessary calls for the game." + }, + { + "from": "human", + "value": "Can you explain the role of each person in the image during a baseball game?" + }, + { + "from": "gpt", + "value": "In the image, we see three key participants in a baseball game: the batter, the catcher, and the umpire. Each person plays a distinct role in the game.\n\n1. The Batter: The batter's primary goal is to hit the incoming ball thrown by the opposing team's pitcher. Batters want to make solid contact and hit the ball into the field to hopefully reach a base, drive in runs, or possibly hit a home run. They also pay attention to the pitcher's throws and aim to make the right decisions on whether to swing or not, based on balls and strikes.\n\n2. The Catcher: The catcher, positioned behind the home plate, is responsible for receiving the pitches thrown by their team's pitcher. Catchers play a crucial role in guiding the pitcher in terms of pitch selection, and they need to read the batters' tendencies accurately. Additionally, catchers block pitches to prevent wild pitches, passed balls, and additional advancements by base-runners. They also play a role in throwing out potential base stealers and are involved in various defensive plays.\n\n3. The Umpire: The umpire is the impartial arbitrator who enforces the rules of the baseball game. Positioned behind the catcher, the umpire observes the game closely and makes essential decisions, such as determining if a pitch is a ball or a strike, whether a batter's swing makes contact, and if a runner is safe or out. Umpires also oversee other aspects of the game, such as monitoring substitutions, inspecting the field's conditions, and enforcing any rule infractions while keeping the game fair and organized.\n\nOverall, each participant in the image has a specific role to play, which ultimately contributes to the successful unfolding of a baseball game." + } + ] + }, + { + "id": "000000004245", + "image": "000000004245.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the condition of the stop sign in the image?\n" + }, + { + "from": "gpt", + "value": "The stop sign in the image is faded and appears to be vandalized, with graffiti or splashed red substance visible on the bottom part of it." + }, + { + "from": "human", + "value": "Where is the stop sign located?" + }, + { + "from": "gpt", + "value": "The stop sign is located on the side of a street in a residential area, near an intersection." + }, + { + "from": "human", + "value": "Does the image show any other objects or infrastructure near the stop sign?" + }, + { + "from": "gpt", + "value": "The image does not provide enough information about other objects or infrastructure near the stop sign. It mainly focuses on the faded and vandalized stop sign in the residential area." + }, + { + "from": "human", + "value": "What are the consequences of having a faded and vandalized stop sign in the neighborhood?" + }, + { + "from": "gpt", + "value": "Having a faded and vandalized stop sign in the neighborhood can lead to a variety of consequences, some of which include:\n\n1. Safety concerns: Faded and vandalized stop signs can be challenging to see or read, particularly in low-light conditions or during inclement weather. This can result in drivers potentially not noticing the sign and failing to stop appropriately at the intersection. Such situations may lead to an increased risk of accidents or collisions involving pedestrians, cyclists, and other motorists.\n\n2. Aesthetics and property value: Faded and vandalized signs can contribute to making the neighborhood look unkempt or neglected. This can potentially lower property values, as it may create an impression of an uncared-for community or an area plagued with vandalism and crime. \n\n3. Negative perception of the community: The presence of graffiti on stop signs might be associated with gangs or antisocial behavior, which can create a negative image of the community. This may lead to increased anxiety or fear among residents, especially if they perceive the area as dangerous or crime-infested.\n\nTo remedy these issues, it is essential for local authorities or neighborhood associations to address the problem by either cleaning or replacing the vandalized stop signs promptly. This helps to ensure road safety and maintain a positive and clean environment, while possibly deterring further acts of vandalism." + } + ] + }, + { + "id": "000000396186", + "image": "000000396186.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat sport is being played in the image?" + }, + { + "from": "gpt", + "value": "The sport being played in the image is tennis." + }, + { + "from": "human", + "value": "What is the gender of the tennis player?" + }, + { + "from": "gpt", + "value": "The tennis player is female." + }, + { + "from": "human", + "value": "What is the tennis player wearing in the image?" + }, + { + "from": "gpt", + "value": "The female tennis player is wearing a green skirt and a white top." + }, + { + "from": "human", + "value": "What action is the tennis player performing in the image?" + }, + { + "from": "gpt", + "value": "The tennis player is swinging her racket at a ball, about to hit it during the game." + }, + { + "from": "human", + "value": "What are the essential skills needed to excel in tennis, based on this image?" + }, + { + "from": "gpt", + "value": "To excel in tennis, several essential skills are needed, and some of these can be inferred from the image of the female tennis player in action. These skills include:\n\n1. Proper technique: It is crucial to learn and execute the correct techniques for various shots like forehand, backhand, serve, and volley. The tennis player's swing in the image demonstrates the importance of technique in successfully hitting the ball and aiming it towards the desired direction on the court.\n\n2. Footwork: Good footwork is vital in tennis, as it enables the player to move quickly and efficiently around the court to reach the ball. The female player's position on the court in the image suggests that she has moved to meet the ball and prepare for a well-executed shot.\n\n3. Timing: Hitting the tennis ball at the optimal moment is crucial for accuracy, power, and control. The image captures the player just about to hit the ball, indicating the importance of timing when swinging the racket.\n\n4. Strength and endurance: Tennis requires physical strength to generate power in shots and stamina to maintain a high energy level throughout the match. The female player's swing and stance imply that she has built up the necessary strength and endurance to compete effectively in the sport.\n\n5. Strategy and decision-making: Tennis involves devising a smart game plan and making split-second decisions during play. The image does not show this directly, but it can be assumed that the player has a strategy in mind when executing her shots and reacting to her opponent's moves.\n\nIn summary, to excel in tennis, a player should focus on mastering proper techniques, improving footwork, refining timing, building strength and endurance, and developing strategic thinking and decision-making skills. These aspects can help a player perform at optimum levels and succeed in the competitive world of tennis." + } + ] + }, + { + "id": "000000401705", + "image": "000000401705.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the color of the cat in the image?" + }, + { + "from": "gpt", + "value": "The color of the cat in the image is orange, as it is an orange tabby cat." + }, + { + "from": "human", + "value": "What is the cat doing in the image?" + }, + { + "from": "gpt", + "value": "The cat is sleeping, curled up inside of a bowl in the image." + }, + { + "from": "human", + "value": "Where is the bowl with the cat located?" + }, + { + "from": "gpt", + "value": "The bowl with the cat is located on a table." + }, + { + "from": "human", + "value": "Is there any other object on the table besides the bowl with the cat?" + }, + { + "from": "gpt", + "value": "Yes, there is a yellow plate on the table as well, but the cat is lying in a separate bowl." + }, + { + "from": "human", + "value": "What can we learn about cats' behavior from this image?" + }, + { + "from": "gpt", + "value": "This image showcases a common behavior among cats, which is their tendency to curl up and sleep in small, enclosed spaces, like a bowl in this case. Cats are known to enjoy the sense of security and warmth that snug resting spots provide them. These spaces allow them to feel protected from potential threats and help them save energy by reducing heat loss during sleep. \n\nCats often sleep for an average of 12 to 16 hours a day, and they have a unique ability to adapt their sleeping habits according to their environment. For instance, in this image, the orange tabby cat has chosen a bowl on a table as its resting place, demonstrating its flexibility in finding suitable sleep locations. This behavior also illustrates cats' innate curiosity and creativity in seeking out unconventional yet cozy spots to take naps. \n\nMoreover, the image of a cat sleeping in a bowl on a table can evoke feelings of warmth, cuteness, and relaxation, which makes it a captivating and endearing sight for people who appreciate the behavior of cats." + } + ] + }, + { + "id": "000000038905", + "image": "000000038905.jpg", + "conversations": [ + { + "from": "human", + "value": "Is the bus in the photo parked or in motion?\n" + }, + { + "from": "gpt", + "value": "The bus in the photo is in motion, traveling down an asphalt road next to a sidewalk." + }, + { + "from": "human", + "value": "What type of neighborhood is the bus traveling through?" + }, + { + "from": "gpt", + "value": "The bus is traveling through a residential section of town." + }, + { + "from": "human", + "value": "Is the street busy or empty?" + }, + { + "from": "gpt", + "value": "The street is empty in the image, with the bus as the primary focus." + }, + { + "from": "human", + "value": "Does the photo show the bus leaving the residential area and crossing a viaduct?" + }, + { + "from": "gpt", + "value": "Yes, the photo shows the bus leaving the residential section of town and traveling across a viaduct." + }, + { + "from": "human", + "value": "Is the bus in the photo an old-fashioned or modern style?" + }, + { + "from": "gpt", + "value": "The bus in the photo is an old-fashioned style, which suggests the image might be an old photo." + } + ] + }, + { + "id": "000000070927", + "image": "000000070927.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many different types of animals are there in the image?" + }, + { + "from": "gpt", + "value": "There are two different types of animals in the image: giraffes and zebras." + }, + { + "from": "human", + "value": "Are the giraffes and zebras in the wild or in an enclosed area?" + }, + { + "from": "gpt", + "value": "The giraffes and zebras are in an enclosed area, which could be a pen, an outdoor enclosure, or a field with a fence." + }, + { + "from": "human", + "value": "What are the giraffes and zebras doing in the image?" + }, + { + "from": "gpt", + "value": "The giraffes and zebras are grazing in the field, likely feeding on grass or other vegetation." + }, + { + "from": "human", + "value": "Can you describe the surrounding environment where the giraffes and zebras are located?" + }, + { + "from": "gpt", + "value": "The surrounding environment is an enclosed field or pen, with trees, rocks, and possibly a fence or barrier. This setting provides a natural-like habitat for the animals, allowing them to feel more comfortable while still ensuring their safety and containment." + } + ] + }, + { + "id": "000000372726", + "image": "000000372726.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the color of the sheets on the bed?" + }, + { + "from": "gpt", + "value": "The color of the sheets on the bed is brown." + }, + { + "from": "human", + "value": "How many dressers or nightstands are in the bedroom?" + }, + { + "from": "gpt", + "value": "There are two small dressers or nightstands in the bedroom, one on either side of the bed." + }, + { + "from": "human", + "value": "What is the condition of the bedspread and pillows on the bed?" + }, + { + "from": "gpt", + "value": "The bedspread is turned down and two white pillows are exposed, contributing to the neat and tidy appearance of the bed." + }, + { + "from": "human", + "value": "What type of floor does the bedroom have?" + }, + { + "from": "gpt", + "value": "The bedroom has a tile floor." + }, + { + "from": "human", + "value": "Is there any unique decoration on the bed?" + }, + { + "from": "gpt", + "value": "Yes, there is an origami towel adorning the bed, adding a unique and artistic touch to the room." + } + ] + }, + { + "id": "000000066124", + "image": "000000066124.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhere are people flying the kites?" + }, + { + "from": "gpt", + "value": "People are flying the kites by the shore on the beach in an area that appears to be a field." + }, + { + "from": "human", + "value": "How many kites are visible in the image, and can you describe their appearance?" + }, + { + "from": "gpt", + "value": "There are a large number of kites visible in the image, which are colorful and flying everywhere in the sky." + }, + { + "from": "human", + "value": "How many people can be seen flying the kites?" + }, + { + "from": "gpt", + "value": "The image shows a couple of guys flying the kites, but there could be more people involved who are not visible in the photo." + }, + { + "from": "human", + "value": "Can you describe the benefits of flying kites as a recreational activity?" + }, + { + "from": "gpt", + "value": "Kite flying is a popular recreational activity with various physical, mental, and social benefits. Here are some of the advantages associated with flying kites:\n\n1. Physical Benefits:\n- Improves hand-eye coordination: Kite flying requires control and precision, which helps improve hand-eye coordination and fine motor skills.\n- Increases physical activity: As kite fliers run and move to maintain the kites in the air, flying kites can be a form of exercise, especially when doing it on a beach or open field where there's ample space to move around.\n- Boosts cardiovascular health: The physical activity involved in kite flying can lead to increased heart rate, which promotes better circulation and cardiovascular health.\n\n2. Mental Benefits:\n- Reduces stress: The process of controlling a kite in the sky can be a relaxing and mindful activity, helping to alleviate stress and anxiety by focusing on the present moment.\n- Enhances concentration: Kite flying requires continued focus and concentration to keep the kite aloft and stable in the wind. This practice can improve attention span and mental discipline.\n- Stimulates creativity: Kite flying lends itself to creativity, with opportunities to experiment with different types of kites, designs, and flying techniques for a unique experience.\n\n3. Social Benefits:\n- Promotes social interaction: Group kite flying, like the scene depicted in the image, can provide an opportunity for people to connect, interact, and develop friendships with others who share this interest.\n- Encourages outdoor play: Kite flying is an outdoor activity that encourages people of all ages to spend time outside, enjoy the fresh air, and connect with nature.\n- Caters to all ages: Kite flying is a versatile activity that can be enjoyed by people of all ages, making it a popular choice for family outings, community events, or friendly gatherings.\n\nIn summary, kite flying offers a range of physical, mental, and social benefits, making it a well-rounded recreational activity suitable for various people and contexts. By flying kites, individuals can improve their hand-eye coordination, concentration, cardiovascular health, creativity, social interactions, and overall well-being while enjoying the outdoors." + } + ] + }, + { + "id": "000000015404", + "image": "000000015404.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the main feature of the sky in the image?\n" + }, + { + "from": "gpt", + "value": "The main feature of the sky in the image is the presence of large, gray storm clouds, which make the sky appear dark and threatening." + }, + { + "from": "human", + "value": "What is the condition of the hillside in the image?" + }, + { + "from": "gpt", + "value": "The hillside in the image is lush and green, suggesting a healthy and vibrant landscape." + }, + { + "from": "human", + "value": "Is there a road visible in this image?" + }, + { + "from": "gpt", + "value": "Yes, there is a paved road visible in the image, which runs through the grassy hill." + }, + { + "from": "human", + "value": "What kind of weather is indicated by the presence of storm clouds in the image?" + }, + { + "from": "gpt", + "value": "The presence of storm clouds in the image indicates an incoming storm or a change in weather. These clouds bring rain, which can make the landscape greener and more vibrant, but can also lead to potential challenges like flooding or slippery roads. The overall atmosphere of the image suggests an impending shift in weather conditions, with the approaching storm clouds making the sky darker." + }, + { + "from": "human", + "value": "How can storm clouds impact the surrounding environment, and what do they imply about the climate in the area?" + }, + { + "from": "gpt", + "value": "Storm clouds can have both positive and negative impacts on the surrounding environment. On the positive side, they bring much-needed rainfall, which nourishes the soil, promotes plant growth, and replenishes water sources such as rivers, lakes, and groundwater reserves. In the image, the lush green hillside suggests that the area might benefit from regular rainfall, contributing to its healthy and vibrant landscape.\n\nOn the negative side, storm clouds can cause severe weather, which may lead to various challenges and disruptions. Heavy rainfall may result in flooding, making roads impassable, causing property damage, and posing risks to human life and wildlife. Storms can also bring strong winds, thunder, and lightning, which can cause power outages, damage to infrastructure or vegetation, and even start wildfires in some cases.\n\nThe presence of storm clouds in the area implies that the climate may be characterized by periods of rainfall or a mix of sunny and rainy days, which contribute to the region's lush landscape. However, the potential for severe weather suggests the need for caution, preparedness, and planning to address any weather-related challenges or emergencies that may arise." + } + ] + }, + { + "id": "000000335111", + "image": "000000335111.jpg", + "conversations": [ + { + "from": "human", + "value": "How many bikes are laying on the sidewalk in the image?\n" + }, + { + "from": "gpt", + "value": "In the image, there are 2 bikes laying on their sides on the sidewalk." + }, + { + "from": "human", + "value": "Is there someone standing near the row of bicycles?" + }, + { + "from": "gpt", + "value": "Yes, there is a woman standing in front of the row of bicycles. She is wearing a black coat with a hood and is standing near a bus stop." + }, + { + "from": "human", + "value": "Are the bicycles located near a particular landmark or facility?" + }, + { + "from": "gpt", + "value": "The bicycles are located near a bus stop, indicating a connection between different modes of transportation accessible in the area." + }, + { + "from": "human", + "value": "What possible reasons might there be for the bikes to be laying on their sides?" + }, + { + "from": "gpt", + "value": "There could be several possible reasons for the bikes laying on their sides:\n\n1. Accidental Knock-over: Someone may have accidentally knocked the bikes over, either due to carelessness or while trying to make space on a crowded sidewalk. In such cases, a passerby or the bike owner might have bumped into the bikes and caused them to fall.\n\n2. Vandalism or Prank: Some individuals may intentionally knock over bicycles as an act of mischief, vandalism or playing a prank. This disrespectful behavior can create inconveniences for the bike owners and others who need to use the sidewalk.\n\n3. Poor Maintenance or Bike Stand: Sometimes, bikes can fall over due to issues related to bike stands or maintenance. A broken or unstable bike stand may not provide adequate support, causing the bikes to topple over. Additionally, if the bikes are not well-maintained, parts such as the tire pressure, loose brakes, or an uneven center of gravity can lead to a bike falling over when parked.\n\n4. Weather Conditions: Strong winds, heavy rain, or other severe weather conditions can cause bikes to fall over, especially if they are not secured properly.\n\nThough it's challenging to pinpoint the exact reason without more information about the situation, these are some possibilities that could have caused the bikes to be laying on their sides in the image." + } + ] + }, + { + "id": "000000236740", + "image": "000000236740.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the color of the teddy bear in the image?\n" + }, + { + "from": "gpt", + "value": "The color of the teddy bear in the image is brown." + }, + { + "from": "human", + "value": "What is the teddy bear wearing?" + }, + { + "from": "gpt", + "value": "The teddy bear is wearing a red bow." + }, + { + "from": "human", + "value": "Where is the teddy bear positioned in the image?" + }, + { + "from": "gpt", + "value": "The teddy bear is sitting on a bed against the headboard." + }, + { + "from": "human", + "value": "Is the teddy bear described as cute or old?" + }, + { + "from": "gpt", + "value": "The teddy bear is described as both cute and old, which can indicate that it has been cherished and well-loved over the years." + }, + { + "from": "human", + "value": "What is another possible location where the teddy bear might be placed?" + }, + { + "from": "gpt", + "value": "Another possible location for the teddy bear could be sitting on a chair leaning against the side, or sitting up against a stair railing. These alternate locations show that the teddy bear can be positioned in various places within a home." + } + ] + }, + { + "id": "000000182020", + "image": "000000182020.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the cat resting on in the image?\n" + }, + { + "from": "gpt", + "value": "The cat is resting on top of a laptop and a sweater." + }, + { + "from": "human", + "value": "Where is the cat located?" + }, + { + "from": "gpt", + "value": "The cat is located on a desk." + }, + { + "from": "human", + "value": "What is the cat doing?" + }, + { + "from": "gpt", + "value": "The cat is laying down, snuggling up to the sweater." + }, + { + "from": "human", + "value": "Is there a folded blanket next to the cat?" + }, + { + "from": "gpt", + "value": "Yes, there is a folded blanket next to the cat, sitting on the desk." + }, + { + "from": "human", + "value": "Do cats typically enjoy laying on warm objects, and if so, why?" + }, + { + "from": "gpt", + "value": "Yes, cats typically enjoy laying on warm objects. There are several reasons for this behavior:\n\n1. Seeking warmth: Cats, like most animals, are drawn to warmth. Their body temperature is slightly higher than that of humans, so they might feel more comfortable when they are warmer. Laying on warm objects, such as a laptop or sunlit spot, allows them to conserve energy and stay cozy.\n\n2. Safety and comfort: Warm and soft surfaces often provide a sense of security and comfort for cats. Laying on warm objects like blankets, sweaters, or laptops can make cats feel more secure as these objects often retain some of the owner's scent, providing a familiar and reassuring environment for the cat.\n\n3. Heat regulation: Cats are better equipped to handle higher temperatures than cold, so they seek warmth to help regulate their body temperature. Laying on warm surfaces can help them stay at a comfortable temperature, especially in colder months or in air-conditioned environments.\n\n4. Attention: Cats may also lay on warm objects, such as laptops, to gain their owner's attention. If a cat notices that their owner spends a significant amount of time on the laptop, they may choose to lay on it to gain some attention or affection.\n\nOverall, cats love laying on warm objects for various reasons, including seeking warmth, feeling secure, and regulating their body temperature. It can also be a way for them to receive attention from their owners." + } + ] + }, + { + "id": "000000523007", + "image": "000000523007.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many tennis players are in the image?" + }, + { + "from": "gpt", + "value": "There are two tennis players in the image." + }, + { + "from": "human", + "value": "Are the tennis players shaking hands with their opponents or just standing on their side of the net?" + }, + { + "from": "gpt", + "value": "The tennis players are shaking hands with their opponents by extending their arms and reaching across the net." + }, + { + "from": "human", + "value": "What are the tennis players holding in their hands?" + }, + { + "from": "gpt", + "value": "The tennis players are holding racquets in their hands." + }, + { + "from": "human", + "value": "What does the handshaking gesture suggest about the tennis match?" + }, + { + "from": "gpt", + "value": "The handshaking gesture between the tennis players suggests that the match has just ended, and they are showing sportsmanship and respect towards their opponents. It is a traditional etiquette followed in tennis and many other sports as a symbol of fair competition and acknowledgement of the players' efforts. The handshake typically occurs after a match is concluded, and it helps reinforce the spirit of camaraderie and goodwill between competitors. It signifies the importance of maintaining a positive attitude, acknowledging each other's skills, and fostering a sense of mutual respect on the court." + } + ] + }, + { + "id": "000000434813", + "image": "000000434813.jpg", + "conversations": [ + { + "from": "human", + "value": "What is located next to the street sign in the image?\n" + }, + { + "from": "gpt", + "value": "In the image, the street sign is sitting next to a container labeled \"salt.\"" + }, + { + "from": "human", + "value": "Is there a bench near the street sign as well?" + }, + { + "from": "gpt", + "value": "Yes, there is a bench nearby the street sign, probably for people to sit down and rest." + }, + { + "from": "human", + "value": "What is the overall setting of the image? Does it include grassy areas or bushes?" + }, + { + "from": "gpt", + "value": "The image includes a road near a grassy area with a bush. This contributes to the overall setting, which seems to be a public space with streets, signs, and seating areas." + }, + { + "from": "human", + "value": "What is the purpose of the salt container next to the street sign?" + }, + { + "from": "gpt", + "value": "The purpose of the salt container next to the street sign is likely for maintenance and safety reasons. During winter months, when roads and sidewalks become icy due to freezing temperatures and snowfall, salt is commonly used to melt the ice and provide traction for vehicles and pedestrians. The salt container is placed nearby the street sign so it can be easily accessed by maintenance workers or even residents for their safety in icy conditions." + }, + { + "from": "human", + "value": "Is there a dog house visible in this image? If not, what could have been mistaken for it?" + }, + { + "from": "gpt", + "value": "There is no dog house visible in the image. It is possible that the container labeled \"salt\" was initially mistaken for a dog house due to its similar size or shape. However, the container's label confirms that it is meant to store salt and is not a dog house." + } + ] + }, + { + "id": "000000254948", + "image": "000000254948.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the man doing in the image?\n" + }, + { + "from": "gpt", + "value": "The man in the image is standing on a lush green field with a soccer ball, getting ready to kick it. He is running toward the soccer ball in a fenced-in enclosure, dressed in a black uniform or track suit." + }, + { + "from": "human", + "value": "Is the man a soccer player?" + }, + { + "from": "gpt", + "value": "Yes, the man is a soccer player, as evidenced by his actions and attire in the image." + }, + { + "from": "human", + "value": "What type of clothing is the man wearing?" + }, + { + "from": "gpt", + "value": "The man is either wearing a black uniform or a track suit, indicating that he is an athlete or soccer player involved in physical activity." + }, + { + "from": "human", + "value": "What is the setting of the image?" + }, + { + "from": "gpt", + "value": "The setting of the image is a lush green field within a fenced-in enclosure, which is likely a soccer field or a sports facility designed for soccer or similar outdoor activities." + }, + { + "from": "human", + "value": "How can soccer help with a person's physical and mental well-being?" + }, + { + "from": "gpt", + "value": "Soccer is a physically demanding sport that offers numerous benefits to an individual's physical and mental well-being.\n\n1. Physical Benefits:\n- Cardiovascular Health: Soccer involves constant running and aerobic exercises, which help improve heart health and overall cardiovascular fitness.\n- Muscular Strength and Endurance: Soccer players engage in various physical movements, such as running, jumping, and kicking, that help to strengthen muscles and increase their endurance.\n- Flexibility and Coordination: The sport requires agility, flexibility, and coordination as players move, change directions, and maintain control of the ball during matches.\n- Burns Calories and Fat: Soccer is an excellent way to burn calories and maintain a healthy body weight due to its high-intensity nature.\n\n2. Mental Benefits:\n- Stress Relief: Playing soccer offers a healthy outlet for releasing stress and tension, leading to improved mental well-being.\n- Social Interaction: Soccer is a team sport that requires communication, cooperation, and camaraderie, promoting social interaction and allowing players to form strong bonds with teammates.\n- Builds Confidence and Self-Esteem: Accomplishing goals, improving skills, and contributing to team victories helps boost a player's sense of self-worth and confidence in their abilities.\n- Teaches Discipline and Perseverance: As soccer players develop their skills and face various challenges on the field, they learn the importance of discipline, perseverance, and maintaining a positive attitude.\n\nIn summary, soccer is an excellent sport for promoting physical and mental well-being. It offers a wide range of physical benefits related to cardiovascular health, muscular strength, flexibility, and weight management. Furthermore, soccer helps to relieve stress, foster social interaction, and teach valuable life skills, such as discipline and perseverance, which contribute to overall psychological health and personal growth." + } + ] + }, + { + "id": "000000405361", + "image": "000000405361.jpg", + "conversations": [ + { + "from": "human", + "value": "Are the people in the image waiting to cross the street or just standing together?\n" + }, + { + "from": "gpt", + "value": "The people in the image are waiting to cross the street at a crosswalk, standing together on the sidewalk near the edge of a busy street." + }, + { + "from": "human", + "value": "Is there a traffic light present in the image, and if so, what is its current status?" + }, + { + "from": "gpt", + "value": "Yes, there is a traffic light present in the image, and the people are waiting for the light to change, which suggests that it is currently red for pedestrians." + }, + { + "from": "human", + "value": "What does the presence of umbrellas indicate about the weather in the image?" + }, + { + "from": "gpt", + "value": "The presence of umbrellas indicates that it might be raining or drizzling, as people are using them to keep dry while waiting to cross the street." + }, + { + "from": "human", + "value": "Are there any cars visible in the image?" + }, + { + "from": "gpt", + "value": "Yes, there are passing cars visible on the busy street next to the group of people waiting on the sidewalk." + } + ] + }, + { + "id": "000000480023", + "image": "000000480023.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the hand holding in the image?" + }, + { + "from": "gpt", + "value": "The hand in the image is holding a hot dog." + }, + { + "from": "human", + "value": "What toppings are on the hot dog?" + }, + { + "from": "gpt", + "value": "The hot dog is covered in mustard, ketchup, and onions." + }, + { + "from": "human", + "value": "Where is the person holding the hot dog?" + }, + { + "from": "gpt", + "value": "The person holding the hot dog is at a sports stadium or a ballpark." + }, + { + "from": "human", + "value": "What is the hot dog served in?" + }, + { + "from": "gpt", + "value": "The hot dog is served in a paper container." + }, + { + "from": "human", + "value": "Is the person showing off the hot dog or is it a close-up shot?" + }, + { + "from": "gpt", + "value": "The person is holding the hot dog close to the camera, showcasing it in a close-up shot." + } + ] + }, + { + "id": "000000015906", + "image": "000000015906.jpg", + "conversations": [ + { + "from": "human", + "value": "What color is the chair in the image?\n" + }, + { + "from": "gpt", + "value": "The chair in the image is black." + }, + { + "from": "human", + "value": "What is the chair positioned under in the image?" + }, + { + "from": "gpt", + "value": "The chair is positioned under a clock on the wall." + }, + { + "from": "human", + "value": "What type of chair is visible in the image?" + }, + { + "from": "gpt", + "value": "The visible chair is a leather chair, suggesting it's comfortable and likely placed in a cozy space." + }, + { + "from": "human", + "value": "Is there anything placed on the chair, such as a book or other items?" + }, + { + "from": "gpt", + "value": "Yes, there is a book placed on the armrest of the black leather chair. This could indicate that someone was recently sitting there reading, or it's a convenient spot for keeping their current read." + }, + { + "from": "human", + "value": "What does the presence of the leather chair and clock on the wall convey about the space or room?" + }, + { + "from": "gpt", + "value": "The presence of the black leather chair and the clock on the wall behind it conveys a sense of coziness, relaxation, and a thoughtful atmosphere. The chair, being comfortable and inviting, suggests that the space is designed for people to unwind, read or even engage in conversations. The clock, on the other hand, provides a functional element to the room, allowing inhabitants to keep track of time when engaged in activities or conversations.\n\nCombining these elements, it can be inferred that the space was created with a particular focus on providing an area where relaxation, quiet contemplation, or intellectual pursuits could take place. This environment could be particularly suitable for someone who values their leisure time, enjoys reading, or appreciates a quieter, more thoughtful setting away from the hustle and bustle of daily life." + } + ] + }, + { + "id": "000000009951", + "image": "000000009951.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the main subject of the image?\n" + }, + { + "from": "gpt", + "value": "The main subject of the image is a young man riding a skateboard." + }, + { + "from": "human", + "value": "Where is the skateboarder located in the image?" + }, + { + "from": "gpt", + "value": "The skateboarder is located on the street, riding on the sidewalk and passing by a storefront, with a tall building in the background." + }, + { + "from": "human", + "value": "What kind of clothing and footwear is the skateboarder wearing?" + }, + { + "from": "gpt", + "value": "The skateboarder is wearing jeans, rubber shoes, and skateboard gear." + }, + { + "from": "human", + "value": "Is the skateboarder performing any specific tricks, or is he simply traveling on the skateboard?" + }, + { + "from": "gpt", + "value": "The skateboarder appears to be traveling on the skateboard rather than performing any specific tricks, as the image shows him turning a corner at high speeds and skating down the street." + } + ] + }, + { + "id": "000000468428", + "image": "000000468428.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat object is the woman holding at the beach?" + }, + { + "from": "gpt", + "value": "The woman is holding a surfboard at the beach." + }, + { + "from": "human", + "value": "What is the color of the surfboard the woman is carrying?" + }, + { + "from": "gpt", + "value": "The surfboard the woman is carrying is green." + }, + { + "from": "human", + "value": "What is the woman's intended activity, as suggested by the image?" + }, + { + "from": "gpt", + "value": "As the woman is carrying a surfboard, her intended activity is most likely surfing, either preparing to go into the water or exiting after a surfing session." + }, + { + "from": "human", + "value": "What are some of the basic techniques for beginners who want to start surfing?" + }, + { + "from": "gpt", + "value": "For beginners wanting to start surfing, there are some basic techniques and steps to follow:\n\n1. Choosing the right equipment: Beginners should start with a soft-top surfboard that is larger and more stable, making it easier to balance on and catch waves. Wearing a leash connected to the surfboard is essential for safety, as it prevents the board from being separated from the surfer in the water.\n\n2. Learning to paddle: Lying prone on the surfboard, the surfer should practice paddling by alternately reaching forward with their arms and pulling through the water. This technique helps propel the surfer in the water and catch waves.\n\n3. Practicing the pop-up: To stand up on the surfboard, the surfer needs to quickly transition from lying down to standing in a controlled, fluid motion, known as the pop-up. The surfer should place their hands near their chest and push themselves up, bringing their feet beneath them to achieve a balanced stance on the board.\n\n4. Stance and balance: Once standing, the surfer should adopt a low, slightly crouched stance, bending their knees and keeping their center of gravity low. The surfer should also try to maintain a relaxed posture, with their weight positioned evenly and arms used for balance.\n\n5. Reading and catching waves: Learning how to read the ocean and catch waves is a crucial aspect of surfing. Beginners should practice in small, gentle waves and focus on understanding how to position themselves correctly to catch a wave. Gradual progression to larger and more challenging waves can be made as the surfer gains experience and confidence.\n\n6. Respecting surf etiquette: It is important for surfers to respect the rules and etiquette of the surf spot, such as giving right of way to other surfers that are already on a wave and not \"dropping in\" on someone else's ride. This helps to ensure a safe and enjoyable experience for all surfers in the water.\n\nTaking surfing lessons from experienced surfers or certified surf instructors can significantly improve the learning process, as well as enhance safety and enjoyment on the water. It is always advisable to practice with friends or in the presence of other surfers to ensure you have help if needed. Safety should always be a priority, including checking weather and water conditions and knowing how to handle rip currents and other ocean hazards." + } + ] + }, + { + "id": "000000296675", + "image": "000000296675.jpg", + "conversations": [ + { + "from": "human", + "value": "What is happening in this baseball game?\n" + }, + { + "from": "gpt", + "value": "In the image, a batter is getting ready to hit the ball during a professional baseball game. The catcher and umpire are both waiting behind the batter at home base, poised for action, while spectators watch the game." + }, + { + "from": "human", + "value": "What are the roles of the umpire, batter, and catcher in this scene?" + }, + { + "from": "gpt", + "value": "In this scene, the umpire, batter, and catcher all play crucial roles in the baseball game. \n\nThe umpire is responsible for making decisions on various aspects of the game, such as calling balls, strikes, and outs. They ensure that the game is conducted according to the official rules and help maintain the integrity of the competition. The umpire is positioned behind the catcher and closely observes the trajectory of the pitched ball to determine whether it is within the strike zone.\n\nThe batter's primary objective is to hit the ball and achieve a base hit or an even better outcome, such as a home run. They must use their skill, timing, and strength to effectively connect with the incoming pitch, then attempt to advance as far around the bases as possible.\n\nThe catcher, positioned behind the batter and in front of the umpire, has several responsibilities. One of their main tasks is to catch the pitched ball thrown by the pitcher while also ensuring it remains within the strike zone. The catcher is responsible for signaling pitch types to the pitcher and defending the home plate against potential base-stealing attempts by the opposing team.\n\nIn the image, the umpire, batter, and catcher all contribute to the dynamic and exciting atmosphere of the baseball game. Each participant must be constantly aware of their unique role to contribute effectively to the game's progress." + }, + { + "from": "human", + "value": "What happens after the batter hits the ball?" + }, + { + "from": "gpt", + "value": "After the batter successfully hits the ball, they will drop the bat and attempt to run to the first base, and ultimately achieve as many bases as possible. The opposing team's fielders will try to catch the ball (resulting in an out if caught on the fly) or field the ball and throw it to the base they believe the batter or other baserunners will be going to in an attempt to get them out. If the batter makes it to first base without being tagged out or before the ball arrives at the base, they achieve a base hit. If the batter hits the ball far enough or with enough power, they may be able to achieve multiple bases or even hit a home run, allowing them to circle all the bases and score a run. Simultaneously, other baserunners on the field will also attempt to advance around the bases, increasing the team's scoring potential." + }, + { + "from": "human", + "value": "Are there any spectators watching the game in the image?" + }, + { + "from": "gpt", + "value": "Yes, there are people watching the baseball game in the image. The presence of spectators adds to the atmosphere, excitement, and pressure for the players as they compete in the match." + } + ] + }, + { + "id": "000000055202", + "image": "000000055202.jpg", + "conversations": [ + { + "from": "human", + "value": "What color is the tray in the image?\n" + }, + { + "from": "gpt", + "value": "The tray in the image is blue." + }, + { + "from": "human", + "value": "How many corn dogs are on the tray?" + }, + { + "from": "gpt", + "value": "There are three corn dogs on the tray." + }, + { + "from": "human", + "value": "What type of dipping sauce is available for the corn dogs?" + }, + { + "from": "gpt", + "value": "There are two cups of mustard available as dipping sauce for the corn dogs." + }, + { + "from": "human", + "value": "Are the corn dogs accompanied by any other food items on the tray?" + }, + { + "from": "gpt", + "value": "No, the image only shows three corn dogs on the blue tray along with the two cups of mustard. There are no other food items present." + }, + { + "from": "human", + "value": "Why are corn dogs considered a popular snack or fast food item?" + }, + { + "from": "gpt", + "value": "Corn dogs are a popular snack or fast food item for several reasons:\n\n1. Convenience: Corn dogs are easy to prepare and consume. They are often pre-cooked and can be quickly reheated in a microwave, oven, or deep fryer. Their stick makes them simple to grab, eat, and carry around, especially at events like fairs, carnivals, or sports games.\n\n2. Taste: Corn dogs consist of a sausage or hot dog coated in a thick layer of cornmeal batter and then fried until crispy. The combination of a juicy sausage and the sweet, crunchy cornmeal batter makes for a satisfying and flavorful snack.\n\n3. Affordability: Being a relatively inexpensive food choice, corn dogs are a popular choice for people looking for a budget-friendly meal option. The price of corn dogs usually remains low, which attracts consumers who are looking for something tasty, filling, and budget-conscious.\n\n4. Nostalgia: Corn dogs are often associated with childhood memories, making them an appealing choice for people seeking some comfort food or reminiscing about their youth when they enjoyed corn dogs at amusement parks or other fun events.\n\n5. Versatility: Corn dogs can be customized with various types of sausages and batters, as well as a wide array of dipping sauces, such as mustard or ketchup, spicy mayonnaise, or honey mustard. This allows people to enjoy corn dogs according to their taste preferences.\n\nIn conclusion, corn dogs are a popular snack or fast food item due to their convenience, taste, affordability, nostalgia, and versatility, making them an enjoyable and fun food option for a wide range of people." + } + ] + }, + { + "id": "000000475567", + "image": "000000475567.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many stuffed animals are visible in the image?" + }, + { + "from": "gpt", + "value": "There are two stuffed animals visible in the image." + }, + { + "from": "human", + "value": "Can you describe the stuffed animals in the image?" + }, + { + "from": "gpt", + "value": "There is a blue stuffed animal sitting next to a brown teddy bear. The teddy bear is wearing a pink hat and shirt. These stuffed animals are seated next to each other." + }, + { + "from": "human", + "value": "Where are the stuffed animals positioned in relation to a bed or crib?" + }, + { + "from": "gpt", + "value": "The two stuffed animals are positioned next to a bed or a baby crib, specifically at the foot of the bed." + }, + { + "from": "human", + "value": "What additional item can be found in the image along with the stuffed animals?" + }, + { + "from": "gpt", + "value": "There is an announcement or a birth certificate behind the stuffed animals in the image, adding significance to their presence." + }, + { + "from": "human", + "value": "What could be the purpose behind placing the stuffed animals next to the announcement or birth certificate?" + }, + { + "from": "gpt", + "value": "The purpose behind placing the stuffed animals next to the announcement or birth certificate could be to celebrate a recent birth or milestone, like a baby shower, a child's birthday, or a birth announcement. Stuffed animals, particularly teddy bears, are popular and recognizable symbols of childhood and innocence. Including them in a photograph with a document like an announcement, birth certificate, or baby-related event adds a personal, sentimental, and visually appealing touch that conveys a sense of warmth, love, and care, making the image more meaningful and memorable. Moreover, the stuffed animals contribute to the creation of a pleasant and inviting atmosphere in the child's room, adding a sense of comfort and companionship as the child grows." + } + ] + }, + { + "id": "000000309615", + "image": "000000309615.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat kind of vehicles can be seen on the busy city street?" + }, + { + "from": "gpt", + "value": "On the busy city street, there are cars and scooters that can be seen." + }, + { + "from": "human", + "value": "What is a significant feature of the stores on the street?" + }, + { + "from": "gpt", + "value": "A significant feature of the stores on the street is that there are many of them at this street and corner, creating a lively and bustling atmosphere." + }, + { + "from": "human", + "value": "How would you describe the overall feel of this city street?" + }, + { + "from": "gpt", + "value": "The overall feel of this city street is vibrant and bustling, with many people moving about, various types of vehicles navigating the road, and numerous stores lining the street, attracting shoppers and visitors. The presence of parked cars also indicates that the area is a popular destination for both locals and tourists. This description suggests that the street is located in the heart of the city, where daily activities and commerce are thriving." + }, + { + "from": "human", + "value": "What elements in the image show that the city street is located in an Asian city?" + }, + { + "from": "gpt", + "value": "The presence of people and scooters in the street scene indicates that the city street is in an Asian city. Scooters are particularly popular in many Asian cities, as they are a convenient and relatively affordable mode of transportation, especially in places with heavy traffic and limited parking spaces. Additionally, certain architectural and cultural elements, like the design of the stores, signage, or people's clothing, might provide clues that suggest that the city is situated in Asia." + } + ] + }, + { + "id": "000000386414", + "image": "000000386414.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many cats are in the image?" + }, + { + "from": "gpt", + "value": "There are two cats in the image." + }, + { + "from": "human", + "value": "Where are the cats located in the bathroom?" + }, + { + "from": "gpt", + "value": "One cat is sitting on top of the bathroom sink, and the other cat is sitting on the counter next to the sink." + }, + { + "from": "human", + "value": "What color are the cats?" + }, + { + "from": "gpt", + "value": "The cats are brown and white in color." + }, + { + "from": "human", + "value": "What could be the possible reasons for the cats being in the bathroom?" + }, + { + "from": "gpt", + "value": "Cats can be curious creatures, and there may be several reasons for them to be in the bathroom.\n\n1. Curiosity: Cats are intrigued by new or unfamiliar environments, and they might be exploring the bathroom to satisfy their natural curiosity.\n\n2. Comfort: Cats often seek out cozy and comfortable spots to rest. The bathroom sink or counter may provide a comfortable and secure place for them to lounge, particularly if the sink is cool and smooth, which can be appealing to cats.\n\n3. Attention: Cats may be seeking attention from their owners or companions. Sitting on the bathroom counter or in the sink may be a way for them to make their presence known and be close to their favorite humans.\n\n4. Water: Some cats are drawn to water sources, such as dripping faucets or water in the sink. They might find the sound or sight of water interesting or be seeking water to drink.\n \nFinally, every cat has its personality, and their reasons for being in the bathroom can differ. The two brown and white cats in the image may have found the bathroom sink and counter to be the perfect place to hang out together or to observe what their human is up to." + } + ] + }, + { + "id": "000000074861", + "image": "000000074861.jpg", + "conversations": [ + { + "from": "human", + "value": "How many trains are visible in the image?\n" + }, + { + "from": "gpt", + "value": "There are multiple trains visible in the image parked at different platforms." + }, + { + "from": "human", + "value": "Are there people visible at the train station? What are they doing?" + }, + { + "from": "gpt", + "value": "Yes, there are several people visible at the train station. Some of them are near the trains, while others are outside a train, saying goodbye to each other." + }, + { + "from": "human", + "value": "What can you say about the appearance of the trains in the photo?" + }, + { + "from": "gpt", + "value": "The trains in the image are very pretty, indicating that they might have an attractive, colorful design, or perhaps feature unique or eye-catching details, adding aesthetic appeal to the train station." + }, + { + "from": "human", + "value": "Are there any other notable features of the train station?" + }, + { + "from": "gpt", + "value": "The train station features multiple platforms, which accommodate the different trains parked there. This suggests that the station is well-organized, facilitating smooth and efficient transit services for passengers." + }, + { + "from": "human", + "value": "What do train stations generally provide for passengers in terms of amenities or services?" + }, + { + "from": "gpt", + "value": "Train stations offer various amenities and services to make the passengers' experience more comfortable and convenient. Some of these amenities may include:\n\n1. Ticket offices or ticket vending machines: This is where passengers can purchase tickets for their journeys or get information about various train routes, schedules, and fares.\n\n2. Waiting areas: Train stations usually have seating areas where passengers can sit and wait for their trains to arrive. These areas often feature benches or chairs, along with lighting and announcements or display screens to inform passengers about train arrivals and departures.\n\n3. Restrooms: Train stations typically have restroom facilities available for passengers to use, ensuring their comfort and hygiene needs are met during their time at the station.\n\n4. Food and beverage options: Many train stations have cafés, restaurants, or food vendors that offer a variety of food and drink options for passengers to enjoy while they wait for their trains.\n\n5. Retail stores: Some larger train stations also feature retail stores, where passengers can shop for various items, such as clothing, books, or souvenirs.\n\n6. Information kiosks and customer service counters: These areas help passengers with any questions they might have about train schedules, ticketing, or station facilities, guiding them and ensuring a smooth travel experience.\n\n7. Luggage storage facilities: Some train stations offer luggage storage services, allowing passengers to safely store their bags while they travel, explore the local area, or wait for their train.\n\n8. Accessibility features: Train stations are usually equipped with ramps, elevators, or escalators to accommodate passengers with mobility challenges, making it easier for them to access different platforms and areas within the station.\n\nIn conclusion, train stations provide various amenities and services to cater to the diverse needs of passengers, ensuring a comfortable and convenient experience for those using the transit system." + } + ] + }, + { + "id": "000000194022", + "image": "000000194022.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the man wearing in the image?" + }, + { + "from": "gpt", + "value": "The man in the image is wearing a black and red jacket." + }, + { + "from": "human", + "value": "What is the man doing with the cow?" + }, + { + "from": "gpt", + "value": "The man is standing beside the cow with his arm on it or holding onto it, demonstrating a sense of connection or ownership." + }, + { + "from": "human", + "value": "What type of animal is the man interacting with?" + }, + { + "from": "gpt", + "value": "The man is interacting with a large cow or bull." + }, + { + "from": "human", + "value": "Where is the man standing in the image?" + }, + { + "from": "gpt", + "value": "The man is standing in a barn next to a cow, suggesting a rural or agricultural setting." + }, + { + "from": "human", + "value": "Why might the man be standing next to the cow or bull in the barn?" + }, + { + "from": "gpt", + "value": "There could be several reasons for the man to be standing next to the cow or bull in the barn. Some of the potential reasons include:\n\n1. An Owner or Farmer: The man might be the owner of the cow or bull, or a farmer responsible for tending to and raising these animals. Farmers often interact closely with their livestock, monitoring their health and ensuring they are safe, well-fed, and comfortable.\n\n2. Veterinarian or Animal Health Professional: The man could be a veterinarian or animal health professional checking on the cow's well-being. In this scenario, he might be performing a routine examination, administering treatment, or providing care to the animal.\n\n3. Visitor or Tourist: The man might be a visitor or tourist at a farm or ranch. Such locations sometimes offer educational tours and hands-on experiences for visitors to learn about farming practices and interact with animals up close.\n\nIn any case, the man's interaction with the cow or bull in the barn demonstrates his interest in the animal and suggests a connection, be it professional or personal, to the animal and its environment." + } + ] + }, + { + "id": "000000059335", + "image": "000000059335.jpg", + "conversations": [ + { + "from": "human", + "value": "What are the main components of the bathroom?\n" + }, + { + "from": "gpt", + "value": "The main components of the bathroom include a stand-up shower, toilet, sink, shelves, built-in storage, a bench, and a window." + }, + { + "from": "human", + "value": "Where is the shower located in the bathroom?" + }, + { + "from": "gpt", + "value": "The shower is located in the corner of the bathroom." + }, + { + "from": "human", + "value": "What kind of storage is available in the bathroom?" + }, + { + "from": "gpt", + "value": "The bathroom has built-in storage in the form of shelves, which provides space for storing toiletries, towels, and other bathroom essentials." + }, + { + "from": "human", + "value": "Is there any seating available in the bathroom?" + }, + { + "from": "gpt", + "value": "Yes, there is a bench in the bathroom, which can be used for seating or placing items while using the bathroom." + }, + { + "from": "human", + "value": "Can you provide any tips for organizing or enhancing the storage space in a bathroom like the one in the image?" + }, + { + "from": "gpt", + "value": "To maximize storage space and enhance the organization in a bathroom like the one in the image, you can consider the following tips:\n\n1. Utilize vertical space: Install additional floating shelves or a wall-mounted cabinet above the toilet or sink to store extra towels, toiletries, and other necessities. This helps to maximize the available vertical space without causing clutter on the countertops.\n\n2. Opt for multi-purpose storage: Choose storage solutions that serve multiple purposes, such as a bench with storage compartments beneath it or a mirror with built-in storage behind it. This can help to create additional storage space while maintaining a clean and functional bathroom environment.\n\n3. Use containers and organizers: Keep your bathroom items neatly organized by using containers, such as clear plastic bins or decorative baskets, to store similar items together. Utilize drawer dividers, trays, or small bins to keep smaller items organized within your built-in storage or shelves.\n\n4. Add hooks: Install hooks or towel racks on the wall or behind the bathroom door to hang towels, bathrobes, or clothes. This helps to free up space on shelves and keeps your bathroom tidy.\n\n5. Keep frequently used items within reach: Arrange your bathroom items so that the most commonly used items are easily accessible. This can help streamline your daily routine and ensure a more efficient use of the available storage space.\n\nBy incorporating these tips, you can maximize the storage space and organization in your bathroom, making the space more functional and enjoyable." + } + ] + }, + { + "id": "000000410908", + "image": "000000410908.jpg", + "conversations": [ + { + "from": "human", + "value": "How many bananas can be seen on the table?\n" + }, + { + "from": "gpt", + "value": "There are two bananas sitting on top of the table." + }, + { + "from": "human", + "value": "What are the other objects on the table besides the bananas?" + }, + { + "from": "gpt", + "value": "Other objects on the table include nuts, oats, seeds, and a glass." + }, + { + "from": "human", + "value": "What is the surface of the table made of?" + }, + { + "from": "gpt", + "value": "The surface of the table is made of wood." + }, + { + "from": "human", + "value": "What could be a possible context or setting for this image?" + }, + { + "from": "gpt", + "value": "The image of the two bananas, nuts, oats, seeds, and a glass on a wooden table could be associated with a healthy meal preparation or a breakfast setting. The presence of various nutritious items suggests that the person who arranged these objects might be focusing on consuming a balanced and wholesome diet. \n\nThe arrangement of these objects might also imply that the person is in the process of making a smoothie or a healthy snack, as bananas, nuts, oats, and seeds are common ingredients in such dishes. A glass in the scene further supports the possibility of mixing these ingredients to create a smoothie. \n\nAnother possible context for this image could be a still life photography setup or an artistic composition. In this case, the objects have been carefully placed on the table to create a visually appealing scene, with a focus on natural light, shadows, textures, and colors.\n\nIn conclusion, the image of two bananas and various objects on a wooden table could be interpreted as a setting depicting healthy meal preparation, breakfast, or an artistic arrangement for photography purposes." + } + ] + }, + { + "id": "000000438698", + "image": "000000438698.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the gender of the person crossing the street in the image?\n" + }, + { + "from": "gpt", + "value": "The gender of the person crossing the street in the image is male." + }, + { + "from": "human", + "value": "What is the content of the image in terms of color?" + }, + { + "from": "gpt", + "value": "The image is black and white." + }, + { + "from": "human", + "value": "What is the person carrying while crossing the street?" + }, + { + "from": "gpt", + "value": "The person is carrying a shopping bag while crossing the street." + }, + { + "from": "human", + "value": "Is there any street sign or symbol pointing at the person in the image?" + }, + { + "from": "gpt", + "value": "Yes, there is an arrow pointing at the man in the image. He is standing under an arrow street sign while carrying groceries." + }, + { + "from": "human", + "value": "Is the person crossing the street at a crosswalk or at an unmarked location?" + }, + { + "from": "gpt", + "value": "The person is crossing the street at a crosswalk." + } + ] + }, + { + "id": "000000192497", + "image": "000000192497.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat activity is the woman in the image engaged in?" + }, + { + "from": "gpt", + "value": "The woman in the image is engaged in making cookies. She's scooping cookie dough from a spoon onto a cookie sheet in a kitchen." + }, + { + "from": "human", + "value": "What type of utensils is the woman using?" + }, + { + "from": "gpt", + "value": "The woman is using two utensils - a spoon and a cookie sheet - to make cookies." + }, + { + "from": "human", + "value": "Is the woman young or old?" + }, + { + "from": "gpt", + "value": "The woman in the image is described as a young woman." + }, + { + "from": "human", + "value": "Is there any indication of the purpose of the cookies she is making?" + }, + { + "from": "gpt", + "value": "There is no direct indication in the image of the specific purpose of the cookies, but it is suggested that she might be preparing food for a party." + }, + { + "from": "human", + "value": "Why is it important to use a cookie sheet when making cookies?" + }, + { + "from": "gpt", + "value": "Using a cookie sheet is essential when making cookies for several reasons:\n\n1. Even baking: Cookie sheets ensure even distribution of heat, leading to uniform baking results for all the cookies placed on the sheet. This minimizes the risk of some cookies being overbaked or underbaked.\n\n2. Easy handling: Cookie sheets provide a flat and stable surface for placing cookie dough, making it easier to slide the sheet in and out of the oven without accidentally disturbing the dough. This ensures that the cookies maintain their shape during baking.\n\n3. Non-stick surfaces: Many cookie sheets have non-stick surfaces or can be lined with parchment paper or silicone baking mats to prevent cookies from sticking or burning. This makes cookie removal and cleanup significantly easier.\n\n4. Reusable: Cookie sheets can be used multiple times for preparing large batches of cookies or other baked goods, making them a convenient and cost-effective option for home bakers.\n\nIn summary, using a cookie sheet when making cookies is essential to achieve consistent baking results, a seamless baking process, and to provide an easy way to handle and remove cookies from the oven." + } + ] + }, + { + "id": "000000412004", + "image": "000000412004.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat activity is the person engaged in?" + }, + { + "from": "gpt", + "value": "The person is engaged in flying a kite." + }, + { + "from": "human", + "value": "What is the gender of the person flying the kite?" + }, + { + "from": "gpt", + "value": "The person flying the kite is a woman." + }, + { + "from": "human", + "value": "What is the setting of the image, and what is the weather like?" + }, + { + "from": "gpt", + "value": "The setting of the image is a beautiful green field, and the weather appears to be cloudy." + }, + { + "from": "human", + "value": "How high is the kite flying in the sky?" + }, + { + "from": "gpt", + "value": "The kite is flying very high in the sky." + }, + { + "from": "human", + "value": "What color is the kite in the image?" + }, + { + "from": "gpt", + "value": "The image does not provide information about the color of the kite." + } + ] + }, + { + "id": "000000103549", + "image": "000000103549.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the man doing in the image related to the horse?" + }, + { + "from": "gpt", + "value": "The man is sitting on a fake horse, presumably participating in a parade." + }, + { + "from": "human", + "value": "What event seems to be taking place in the image?" + }, + { + "from": "gpt", + "value": "A parade seems to be taking place, with the man on the fake horse being one of the participants, and people watching the event." + }, + { + "from": "human", + "value": "What is the color of the man's clothing?" + }, + { + "from": "gpt", + "value": "The man is wearing blue clothing." + }, + { + "from": "human", + "value": "Are there any spectators seen in the image?" + }, + { + "from": "gpt", + "value": "Yes, there are people visible in the image, watching the parade and the man on the fake horse." + }, + { + "from": "human", + "value": "Why are fake horses used in parades, and how are they typically designed?" + }, + { + "from": "gpt", + "value": "Fake horses are often used in parades for various reasons, such as:\n\n1. Tradition: Some parades may have historical or cultural significance where the inclusion of horses was a common practice in the past, but using real horses is impractical for modern-day events. In this case, fake horses can serve as a symbolic representation, paying homage to that tradition.\n\n2. Safety and practicality: Real horses can be unpredictable and may pose risks to participants and spectators. Utilizing a fake horse eliminates these concerns and allows for safer and more controlled event management. Additionally, fake horses do not require the same level of care, feeding, or grooming as real horses, making them easier and more cost-effective to include in a parade.\n\n3. Art and creativity: Fake horses can be designed in various styles, colors, and materials, allowing for artistic expression and the opportunity for parade organizers to create visually striking and memorable displays.\n\nTypically, fake horses used in parades are designed to resemble real horses in form and proportions but may be stylized, decorated, or embellished to suit the event's theme, cultural heritage, or artistic vision. Common materials used for construction include fiberglass, papier-mâché, wood, and various types of fabric or soft materials.\n\nFake horses can be mounted on wheels or frames, allowing them to be easily pulled or pushed by participants, or they can be designed with leg supports and a hollow body for a person to wear and walk in, creating the illusion of riding a horse. Regardless of the method, the goal is to achieve a convincing representation of a horse while offering safety, convenience, and an engaging visual experience for the parade attendees." + } + ] + }, + { + "id": "000000280599", + "image": "000000280599.jpg", + "conversations": [ + { + "from": "human", + "value": "How many animals are in the image?\n" + }, + { + "from": "gpt", + "value": "There are four animals in the image." + }, + { + "from": "human", + "value": "What kind of animals are present in the image?" + }, + { + "from": "gpt", + "value": "The animals in the image are sheep." + }, + { + "from": "human", + "value": "Are the sheep standing or laying down?" + }, + { + "from": "gpt", + "value": "The sheep are laying down, resting in the grass." + }, + { + "from": "human", + "value": "What is the condition of the field in the image?" + }, + { + "from": "gpt", + "value": "The field in the image is grass-covered, green, and well-maintained." + }, + { + "from": "human", + "value": "Is there any indication in the image about the recent activities of the sheep or their environment?" + }, + { + "from": "gpt", + "value": "Yes, the image shows freshly-shorn sheep, which indicates that they have recently been through the shearing process, where their wool has been removed for various uses such as textile production. The sheep appear to be relaxing in the green field, resting in the shade, implying that they are comfortable and content in their environment. The presence of a tree nearby provides them with some shade and adds to the natural, pastoral setting. The well-maintained grassy field further suggests that their habitat is properly cared for and suitable for the sheep's well-being." + } + ] + }, + { + "id": "000000299273", + "image": "000000299273.jpg", + "conversations": [ + { + "from": "human", + "value": "How many couches are there in the living room?\n" + }, + { + "from": "gpt", + "value": "There are two couches in the living room." + }, + { + "from": "human", + "value": "What material is the coffee table made of?" + }, + { + "from": "gpt", + "value": "The coffee table in the living room is made of glass." + }, + { + "from": "human", + "value": "What color are the sofas in the living room?" + }, + { + "from": "gpt", + "value": "The sofas in the living room are red." + }, + { + "from": "human", + "value": "Is there a television in the living room?" + }, + { + "from": "gpt", + "value": "Yes, there is a television in the living room." + }, + { + "from": "human", + "value": "What is the person in the living room doing?" + }, + { + "from": "gpt", + "value": "The person in the living room is bending down by the television." + } + ] + }, + { + "id": "000000063912", + "image": "000000063912.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the weather condition in the image?\n" + }, + { + "from": "gpt", + "value": "The weather condition in the image is rainy." + }, + { + "from": "human", + "value": "What is the person doing on the sidewalk?" + }, + { + "from": "gpt", + "value": "The person is walking on the sidewalk while holding a black umbrella to protect themselves from the rain." + }, + { + "from": "human", + "value": "What color are the person's clothing and umbrella?" + }, + { + "from": "gpt", + "value": "The person is wearing black clothing, and they are using a black umbrella." + }, + { + "from": "human", + "value": "Is the image taken from a ground perspective or a higher vantage point, such as from a window?" + }, + { + "from": "gpt", + "value": "The image is taken from a higher vantage point, such as from a window, as it overlooks the person walking in the rain on the sidewalk." + }, + { + "from": "human", + "value": "How many people are visible in the image?" + }, + { + "from": "gpt", + "value": "There is a single person visible in the image, walking on the sidewalk and holding a black umbrella." + } + ] + }, + { + "id": "000000009542", + "image": "000000009542.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the woman doing in the image?\n" + }, + { + "from": "gpt", + "value": "The woman in the image is walking with an open umbrella, presumably to protect herself from the rain." + }, + { + "from": "human", + "value": "Does the woman have any other items with her?" + }, + { + "from": "gpt", + "value": "Yes, the woman is also wearing a backpack while walking with her umbrella." + }, + { + "from": "human", + "value": "Is the person walking in the rain or under clear skies?" + }, + { + "from": "gpt", + "value": "The person is walking in the rain, as they are using an open umbrella which indicates that it is probably raining." + }, + { + "from": "human", + "value": "Where is the woman walking? What is the background like?" + }, + { + "from": "gpt", + "value": "The woman is walking in front of a busy road. This suggests that she is in an urban or developed area with traffic and possibly other pedestrians around her." + } + ] + }, + { + "id": "000000359375", + "image": "000000359375.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat are the main components of the bathroom in the image?" + }, + { + "from": "gpt", + "value": "The main components of the bathroom in the image include a sink, a shower stall, and a mirror." + }, + { + "from": "human", + "value": "Is the bathroom spacious or small?" + }, + { + "from": "gpt", + "value": "The bathroom in the image is extremely small, with minimal things in it." + }, + { + "from": "human", + "value": "Is there any storage for toiletries visible in the bathroom?" + }, + { + "from": "gpt", + "value": "Yes, there is a basket of toiletries visible in the bathroom, likely meant for storage and easy access to essential items." + }, + { + "from": "human", + "value": "What color is the bathroom sink?" + }, + { + "from": "gpt", + "value": "The bathroom sink is white." + }, + { + "from": "human", + "value": "Can you discuss some challenges and advantages of having a small bathroom?" + }, + { + "from": "gpt", + "value": "Having a small bathroom can present both challenges and advantages. Here are some points to consider:\n\nChallenges:\n1. Limited space: A small bathroom provides limited space for maneuvering, making it difficult to move around comfortably or share the space with another person at the same time.\n2. Storage: Small bathrooms often have limited storage options, which can make it challenging to keep toiletries, towels, and other necessities organized and easily accessible.\n3. Clutter: With minimal space, small bathrooms can quickly become cluttered if not regularly maintained, giving a cramped and messy appearance.\n4. Functionality: Fitting all necessary fixtures and components, such as a sink, toilet, shower or bathtub, and storage, within a confined space may hinder the bathroom's overall functionality and aesthetics.\n\nAdvantages:\n1. Cost-effectiveness: Smaller bathrooms are generally less expensive to build, renovate, or maintain since they require fewer materials and lower maintenance costs for fixtures, plumbing, and electricity.\n2. Easy cleaning: Due to the limited space, small bathrooms can be easier and quicker to clean, as there are fewer surfaces to manage.\n3. Minimalism: A small bathroom encourages minimalism and thoughtful design choices, which can lead to a stylish and functional space without excess clutter or unnecessary accessories.\n4. Environmental impact: A smaller bathroom typically has a lower environmental impact, as it requires fewer resources and generates less waste, both during construction and ongoing upkeep.\n\nIn conclusion, small bathrooms can present challenges such as limited space, storage, and functionality, but they also offer advantages such as cost-effectiveness, easy cleaning, minimalism, and reduced environmental impact. The key to making a small bathroom work effectively is careful planning and design, so that it maximizes its potential while accommodating the occupants' needs and preferences." + } + ] + }, + { + "id": "000000434354", + "image": "000000434354.jpg", + "conversations": [ + { + "from": "human", + "value": "\nIs there a Christmas tree in the living room?" + }, + { + "from": "gpt", + "value": "Yes, there is a Christmas tree in the living room where the group of people is gathered." + }, + { + "from": "human", + "value": "What is the main activity of the group of people in the image?" + }, + { + "from": "gpt", + "value": "The main activity of the group of people in the image is opening gifts in the living room." + }, + { + "from": "human", + "value": "Are there any chairs visible in the living room where the people are gathered?" + }, + { + "from": "gpt", + "value": "Yes, there are chairs in the living room where the group of people is gathered near the Christmas tree." + }, + { + "from": "human", + "value": "Is there a television or monitor in the room?" + }, + { + "from": "gpt", + "value": "Yes, there is a giant television or monitor in the room where the group of people is gathered." + }, + { + "from": "human", + "value": "What is the significance of opening Christmas presents with friends and family?" + }, + { + "from": "gpt", + "value": "Opening Christmas presents with friends and family is a cherished tradition for many people. It carries significance in several ways:\n\n1. Strengthening bonds and relationships: Sharing moments of joy, surprise, and appreciation while opening gifts helps reinforce the emotional connections between friends and family members. This shared experience creates lasting memories and strengthens relationships.\n\n2. Expressing gratitude and thoughtfulness: Giving and receiving presents during Christmas serves as a way to show gratitude, love, and care for one another. The act of choosing, wrapping, and presenting gifts demonstrates thoughtfulness and an understanding of the recipient's tastes and preferences.\n\n3. Celebrating the spirit of the season: The exchange of gifts is closely associated with the spirit of Christmas, which emphasizes the values of generosity, kindness, and goodwill. By giving and receiving gifts, people come together to celebrate these values and create a festive atmosphere.\n\n4. Encouraging a sense of community and togetherness: The practice of opening presents together allows friends and family to set aside their daily routines and stresses to focus on spending quality time together. This fosters a sense of community, belonging, and togetherness among the group.\n\nIn conclusion, opening Christmas presents with friends and family is significant because it promotes strong bonds, gratitude, thoughtfulness, celebration of the season's spirit, and fosters a sense of community and togetherness. These factors contribute to making the tradition an emotionally and socially rewarding experience for those involved." + } + ] + }, + { + "id": "000000548184", + "image": "000000548184.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the small girl doing in the image?\n" + }, + { + "from": "gpt", + "value": "The small girl in the image is pulling a piece of luggage or a suitcase." + }, + { + "from": "human", + "value": "Is the girl near a car?" + }, + { + "from": "gpt", + "value": "Yes, the small girl is pulling the luggage bag near a car." + }, + { + "from": "human", + "value": "What type of surface is the girl on while pulling the suitcase?" + }, + { + "from": "gpt", + "value": "The girl is dragging the suitcase up a gravel driveway." + }, + { + "from": "human", + "value": "What can we infer about the context of the image?" + }, + { + "from": "gpt", + "value": "From the image, we can infer that the little girl and her family are likely on vacation or returning from a trip. Her action of pulling the suitcase after a car ride suggests that they have just arrived at their destination or returned home. The presence of the suitcase indicates that she and her family have been traveling, and the gravel driveway could be the location of their vacation spot, such as a cabin or a rental home. The small girl seems to be taking responsibility for her own luggage, perhaps showing independence or excitement about the vacation." + } + ] + }, + { + "id": "000000154708", + "image": "000000154708.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the main activity shown in the image?\n" + }, + { + "from": "gpt", + "value": "The main activity in the image is a man surfing on a wave in the ocean." + }, + { + "from": "human", + "value": "How large is the wave that the man is surfing?" + }, + { + "from": "gpt", + "value": "The wave that the man is surfing is moderate in size, not particularly small or extremely large." + }, + { + "from": "human", + "value": "Where is the man surfing - near the beach or in the middle of the ocean?" + }, + { + "from": "gpt", + "value": "The man is surfing at the beach, as the wave he is riding is likely to be close to shore rather than in the middle of the ocean." + }, + { + "from": "human", + "value": "What is the surfer's body position like on the surfboard?" + }, + { + "from": "gpt", + "value": "The surfer is standing on the surfboard while riding the wave, maintaining balance and control over the board." + }, + { + "from": "human", + "value": "Can you explain the basics of surfing and the skills required?" + }, + { + "from": "gpt", + "value": "Surfing is an outdoor water sport where individuals ride on the forward face of a moving wave, such as those found near beaches, using a surfboard. The primary goal of surfing is to glide across the surface of the water while maintaining balance and control. It requires a combination of physical strength, agility, balance, timing, and an understanding of how waves and ocean currents behave.\n\nSome key surfing skills and techniques include:\n\n1. Paddling: Before catching a wave, surfers need to paddle out using their arms to propel the surfboard through the water. This requires upper body strength and endurance.\n\n2. Duck diving: To reach the area where suitable waves break, surfers must pass through incoming waves. Duck diving involves pushing the nose of the surfboard underwater, diving beneath the breaking wave, and resurfacing on the other side to continue paddling out.\n\n3. Catching a wave: Timing is crucial in catching a wave. As a suitable wave approaches, surfers need to paddle in the same direction as the wave to gain enough speed to catch and ride it.\n\n4. Popping up: Once a surfer catches a wave, they must quickly transition from a prone position (lying on the surfboard) to a standing position. This is called popping up and requires strength, agility, and balance.\n\n5. Riding the wave: While riding the wave, surfers use their legs to shift weight and maintain balance on the surfboard. They also use their arms for support, guidance, and making adjustments to their direction and speed.\n\n6. Turning and maneuvering: While riding a wave, surfers need to perform various turns and maneuvers to stay on the wave and avoid wiping out. This requires precise control, technique, and an understanding of wave dynamics.\n\nSurfing also involves knowledge about ocean safety, including rip currents, tides, and local hazards, such as reefs or rocks. Moreover, surfing etiquette and understanding the unwritten rules and priorities on the water are essential to ensure a positive experience for everyone in the lineup." + } + ] + }, + { + "id": "000000242008", + "image": "000000242008.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the relationship between the man and the child in the image?\n" + }, + { + "from": "gpt", + "value": "The man in the image is the father of the small child." + }, + { + "from": "human", + "value": "Where is the child positioned in relation to the motorcycle?" + }, + { + "from": "gpt", + "value": "The child is sitting on the motorcycle." + }, + { + "from": "human", + "value": "What is the father doing in the image?" + }, + { + "from": "gpt", + "value": "The father is standing next to the motorcycle, posing with his small son who is sitting on the bike." + }, + { + "from": "human", + "value": "Is the motorcycle parked or in motion?" + }, + { + "from": "gpt", + "value": "The motorcycle is parked." + }, + { + "from": "human", + "value": "Can you discuss the importance of safety precautions when children are around motorcycles?" + }, + { + "from": "gpt", + "value": "Safety is a paramount concern when children are around motorcycles, as these vehicles have the potential to pose significant risks if necessary precautions are not taken. It is essential for parents and guardians to be aware of safety guidelines and ensure that children are properly educated about responsible behavior around motorcycles. Below are a few safety precautions to consider:\n\n1. Supervision: Children should always be supervised when around motorcycles, especially if they are allowed to sit on or touch the bike. Young children might be tempted to play with or operate the motorcycle, which could lead to accidents or injuries.\n\n2. Safety gear: When riding or sitting on a motorcycle, even for a short distance, it is essential for both the child and the adult to wear appropriate safety gear. This includes helmets, gloves, protective clothing, and sturdy footwear. Helmets can reduce the risk of head injuries, saving lives in case of accidents.\n\n3. Educate children: Teaching children about motorcycle safety from a young age can help them develop an understanding of responsible behavior around motorbikes. Discussing topics such as road safety, the importance of wearing protective gear, and how to approach a running motorcycle are essential aspects of motorcycle safety education.\n\n4. Child-sized motorcycle equipment: If a child is allowed to ride a motorcycle, it is crucial to provide them with child-sized safety gear and equipment, such as smaller helmets, gloves, and boots. Properly fitting safety gear provides better protection in case of an accident.\n\n5. Safe environment: Children should only be allowed to ride or touch motorcycles in safe, controlled environments, such as private properties or designated safe zones. Avoid exposing children to potentially hazardous situations, such as busy roads, uneven terrain, or slippery surfaces.\n\nIn conclusion, safety precautions are vital when children are around motorcycles to minimize the risk of accidents and injuries. By being vigilant, ensuring proper supervision, providing appropriate safety gear, educating children about motorcycle safety, and ensuring a safe environment, parents and guardians can create a safer space for children to interact with motorcycles." + } + ] + }, + { + "id": "000000097296", + "image": "000000097296.jpg", + "conversations": [ + { + "from": "human", + "value": "What action is the baseball player performing in the image?\n" + }, + { + "from": "gpt", + "value": "The baseball player is swinging at a pitch, about to hit the ball thrown by the pitcher with a baseball bat." + }, + { + "from": "human", + "value": "What object is the baseball player using to hit the ball?" + }, + { + "from": "gpt", + "value": "The baseball player is using a baseball bat to hit the ball." + }, + { + "from": "human", + "value": "Is the baseball player hitting the ball during a practice session or a game?" + }, + { + "from": "gpt", + "value": "The image shows the baseball player hitting the ball during a game." + }, + { + "from": "human", + "value": "Can you describe the moment captured in the image?" + }, + { + "from": "gpt", + "value": "The moment captured in the image is right before the batter connects the bat and the ball. This is a critical instant in the baseball game, as the batter's skill and timing in making contact with the ball can determine the outcome of the play. It could result in a home run, a base hit, a strike, or an out depending on the batter's swing and the trajectory of the ball after being hit." + }, + { + "from": "human", + "value": "How important is the batter's skill and timing in hitting the ball successfully?" + }, + { + "from": "gpt", + "value": "The batter's skill and timing play a crucial role in successfully hitting the ball during a baseball game. The ability to hit a fastball, curveball, or other types of pitches requires excellent hand-eye coordination, strength, and proper technique. \n\nThe timing aspect is critical because the batter needs to accurately judge the speed, direction, and potential movement of the ball in a split second. Adjusting the swing accordingly helps the batter make solid contact with the ball and increase the chances of sending it deep into the field or even hitting a home run. \n\nA successful batter must have keen reflexes, an understanding of the pitcher's throwing patterns, and the ability to make quick adjustments during the swing. Together, these elements help the batter improve their chances of connecting the bat with the ball effectively and achieving desirable results, such as scoring runs or advancing teammates around the bases. On the other hand, any shortcomings in skill or timing may lead to unfavorable outcomes, such as strikes and outs, which hamper their team's overall performance." + } + ] + }, + { + "id": "000000078562", + "image": "000000078562.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat color is the cat in the image?" + }, + { + "from": "gpt", + "value": "The cat in the image is white." + }, + { + "from": "human", + "value": "Where is the cat positioned in relation to the keyboard?" + }, + { + "from": "gpt", + "value": "The white cat is laying next to the keyboard on a desk." + }, + { + "from": "human", + "value": "What type of surface is the cat laying on?" + }, + { + "from": "gpt", + "value": "The cat is laying on a wood desk." + }, + { + "from": "human", + "value": "Is the cat sitting or laying down?" + }, + { + "from": "gpt", + "value": "The cat is laying down." + }, + { + "from": "human", + "value": "What is the likely reason for the cat to be next to the keyboard?" + }, + { + "from": "gpt", + "value": "While it's difficult to determine the exact reason for the cat's presence next to the keyboard, it's quite common for cats to seek the company of their owners or to be attracted to warm or comfortable surfaces. The cat may be lying next to the keyboard because it is near its owner who is likely using the computer, or it may simply find the desk's surface to be a comfortable spot to rest. Additionally, cats are often intrigued by the movement of objects like computer mice or the sound of keyboard presses, which could also pique their interest and cause them to lay beside the keyboard." + } + ] + }, + { + "id": "000000152764", + "image": "000000152764.jpg", + "conversations": [ + { + "from": "human", + "value": "How are the motorcycles arranged in the image?\n" + }, + { + "from": "gpt", + "value": "The motorcycles are arranged in a row, parked on the street." + }, + { + "from": "human", + "value": "What color are the motorcycles in the image?" + }, + { + "from": "gpt", + "value": "The motorcycles in the image are black." + }, + { + "from": "human", + "value": "What are the people doing near the motorcycles?" + }, + { + "from": "gpt", + "value": "The people, likely the motorcycle riders or owners, are congregating or milling about in the street near the parked motorcycles, possibly socializing, discussing their bikes, or preparing for a ride." + }, + { + "from": "human", + "value": "Are the motorcycles modern or vintage in style?" + }, + { + "from": "gpt", + "value": "The motorcycles in the image are described as vintage, which means they are older models or have a classic design." + }, + { + "from": "human", + "value": "What do the owners of the motorcycles appear to be wearing?" + }, + { + "from": "gpt", + "value": "The owners of the motorcycles appear to be wearing leather attire, which is often associated with motorcycle culture and serves as a protective gear for riders." + } + ] + }, + { + "id": "000000451597", + "image": "000000451597.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat items are present in the bowl of food?" + }, + { + "from": "gpt", + "value": "The bowl of food contains granola, bananas, strawberries, and raspberries. There is also a container of yogurt next to the bowl." + }, + { + "from": "human", + "value": "What type of dishware is used to hold the fruit mix and granola?" + }, + { + "from": "gpt", + "value": "The dishware used to hold the fruit mix and granola is a white plate, and the fruit mix and granola are served in a bowl placed on the plate." + }, + { + "from": "human", + "value": "Is there any utensil included in the image?" + }, + { + "from": "gpt", + "value": "Yes, there is a spoon included with the bowl of granola and fruit in the image." + }, + { + "from": "human", + "value": "What type of meal would this bowl of granola and fruit be suitable for?" + }, + { + "from": "gpt", + "value": "This bowl of granola and fruit would be suitable for a healthy and nutritious breakfast or snack. Granola is rich in fiber, and the addition of the mixed fruits — including bananas, strawberries, and raspberries — provides vitamins and antioxidants. The yogurt provides a good source of protein, calcium, and probiotics, which promote a healthy gut. Combined, these ingredients make for a well-rounded and delicious meal that can be enjoyed in the morning or as a wholesome snack during the day." + } + ] + }, + { + "id": "000000503781", + "image": "000000503781.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the bird doing in the image?\n" + }, + { + "from": "gpt", + "value": "The bird is perched on a tree branch, sitting and resting among the green leaves surrounding it." + }, + { + "from": "human", + "value": "What is the visibility of the bird in the image?" + }, + { + "from": "gpt", + "value": "The bird is visible but appears to be somewhat obscured by blurry leaves around it, which creates a somewhat ruffled appearance." + }, + { + "from": "human", + "value": "Can you describe the general atmosphere or setting of the image?" + }, + { + "from": "gpt", + "value": "The image displays a serene and peaceful atmosphere characterized by the bird quietly resting on a branch. With the green leaves surrounding the bird, it suggests that the setting is likely in a natural environment, such as a park or a forest. The overall scene evokes a sense of tranquility and harmony between the bird and its surroundings." + }, + { + "from": "human", + "value": "Why might the bird be resting on the branch?" + }, + { + "from": "gpt", + "value": "Birds typically perch and rest on branches for various reasons. Some possible explanations for the bird resting on the branch in this image could be:\n\n1. Safety and predator avoidance: Birds often rest up high on branches to avoid predators and observe their surroundings from a more secure vantage point.\n\n2. Conserving energy: After flying, foraging, or other activities, birds may perch on branches to rest and conserve energy for their next flight or activity. Resting also helps birds maintain their body temperature and recover from exhaustion.\n\n3. Grooming: Perching on a branch provides birds the opportunity to groom their feathers, which is an essential part of maintaining their overall health and flight capabilities.\n\n4. Courtship and social interactions: Birds may also rest on branches as part of their courtship rituals or to engage in social interactions with other birds.\n\nIn summary, the bird in the image might be resting on the branch for safety, energy conservation, grooming, or social interactions. The serene and peaceful feeling conveyed by the scene emphasizes the bird's state of relaxation and contentment within its natural environment." + } + ] + }, + { + "id": "000000427548", + "image": "000000427548.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many computer monitors are on the desk?" + }, + { + "from": "gpt", + "value": "There are four computer monitors on the desk." + }, + { + "from": "human", + "value": "Is there a computer tower on the desk as well?" + }, + { + "from": "gpt", + "value": "Yes, there is a computer tower sitting together with the four monitors on the desk." + }, + { + "from": "human", + "value": "Do the computer monitors have the same screensaver?" + }, + { + "from": "gpt", + "value": "Yes, the multiple computer monitors on the desk have the same screensaver." + }, + { + "from": "human", + "value": "What type of environment does this desk setup suggest?" + }, + { + "from": "gpt", + "value": "The desk setup with four computer monitors and a tower suggests a work or office environment. A multi-monitor setup like this is often used by professionals who require extensive screen real estate for their tasks, such as graphic designers, programmers, video editors, or financial traders. Having multiple screens allows them to have multiple applications, documents, or projects open simultaneously, making it easier to multitask, navigate between applications, and be more efficient. The presence of the same screensaver on all monitors implies that the setup may be part of an organized workspace, possibly within a corporate setting where uniformity is maintained. Overall, it appears to be a productive and efficient workstation designed to handle complex or demanding tasks." + } + ] + }, + { + "id": "000000344187", + "image": "000000344187.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat sport are the people playing in the image?" + }, + { + "from": "gpt", + "value": "The people in the image are playing baseball." + }, + { + "from": "human", + "value": "What is the batter attempting to do in the image?" + }, + { + "from": "gpt", + "value": "The batter is attempting to hit the baseball with the bat during his swing." + }, + { + "from": "human", + "value": "Is the batter successful in hitting the ball?" + }, + { + "from": "gpt", + "value": "No, the batter misses the ball during his swing." + }, + { + "from": "human", + "value": "What is the role of the person standing behind the batter with the catcher's mitt?" + }, + { + "from": "gpt", + "value": "The person standing behind the batter with the catcher's mitt is playing the role of the catcher in the game of baseball. The catcher's primary responsibility is to receive and catch the pitches thrown by the pitcher towards the home plate. They are also responsible for coordinating the pitcher's strategy and keeping an eye on base runners. In the image, the catcher is prepared and positioned to catch the ball if the batter misses the swing." + }, + { + "from": "human", + "value": "What is the significance of a batter hitting the ball well during a game of baseball?" + }, + { + "from": "gpt", + "value": "In the game of baseball, the batter's main objective is to hit the ball and make contact with it in such a way that they are able to reach one or more of the bases safely, potentially scoring runs for their team. Hitting the ball well is essential for the following reasons:\n\n1. Advancing bases: Good contact with the ball allows the batter to advance to the first base or even beyond, based on the strength and angle of the hit. Better hits increase the odds of reaching multiple bases or scoring a run directly.\n \n2. Driving in runs: When there are runners on base, a well-timed and powerful hit can drive in the base runners to score runs for the team, thereby improving the team's overall score and chances of winning.\n\n3. Challenging the fielders: When a batter hits the ball well, they create a challenging situation for the opposing team's fielders, forcing them to react quickly, adjust their positions, or make difficult plays to prevent base advances or runs.\n\n4. Sequential hitting: Consistently hitting the ball well could lead to a string of successful plays or “rallies” for the batting team, increasing momentum and boosting the team's morale.\n\nIn summary, hitting the baseball well is crucial for advancing bases, driving in runs, challenging the opposing team's fielders, and maintaining positive momentum throughout the game. Therefore, a batter's ability to consistently make good contact with the ball is a valuable skill in the game of baseball." + } + ] + }, + { + "id": "000000444367", + "image": "000000444367.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat type of train is depicted in the image?" + }, + { + "from": "gpt", + "value": "The image shows a commuter train, which is designed for transporting passengers over short to medium distances within a metropolitan area or between nearby cities." + }, + { + "from": "human", + "value": "Can you describe the colors of the train in the image?" + }, + { + "from": "gpt", + "value": "The train in the image is red and black." + }, + { + "from": "human", + "value": "What is the current status of the train in relation to the platform?" + }, + { + "from": "gpt", + "value": "The train is leaving the station, speeding past a subway platform where passengers walk and wait." + }, + { + "from": "human", + "value": "How does the platform appear in terms of cleanliness?" + }, + { + "from": "gpt", + "value": "The platform in the image is clean, indicating that it is well-maintained and cared for by the transportation authorities." + }, + { + "from": "human", + "value": "What can you infer about the efficiency of the public transportation system from the image?" + }, + { + "from": "gpt", + "value": "Based on the image, the public transportation system seems to be efficient, as evidenced by the clean subway platform and the speeding train leaving the station. The clean platform suggests that the transportation authorities are diligent in maintaining the station, which can directly impact the overall quality and satisfaction of the riders. The train's timely departure also implies that the rail system is likely running on a schedule, allowing passengers to plan their commutes accordingly.\n\nEfficient public transportation is essential for a city, as it can alleviate traffic congestion, reduce pollution, and help residents travel more conveniently from one place to another. Furthermore, a well-organized and clean transportation system can contribute to a positive experience for the passengers and encourage more people to use public transit, ultimately leading to a more sustainable, eco-friendly city. Therefore, the image of a commuter train leaving a clean subway platform reflects a well-managed public transportation system that is beneficial to both the residents and the city itself." + } + ] + }, + { + "id": "000000114697", + "image": "000000114697.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the activity being depicted in the image?" + }, + { + "from": "gpt", + "value": "The activity depicted in the image is a young boy playing baseball in a green field. He is swinging a baseball bat, getting ready to hit a baseball at home plate." + }, + { + "from": "human", + "value": "What is the color of the field where the boy is playing baseball?" + }, + { + "from": "gpt", + "value": "The color of the field where the boy is playing baseball is green." + }, + { + "from": "human", + "value": "Is the baseball player holding anything, and if so, what is he holding?" + }, + { + "from": "gpt", + "value": "Yes, the baseball player is holding a baseball bat." + }, + { + "from": "human", + "value": "What stage of hitting the baseball is the young boy at in the image?" + }, + { + "from": "gpt", + "value": "In the image, the young boy is just getting ready to hit the baseball. This suggests that he is at the stage where he is swinging (or has just begun to swing) the bat, preparing to make contact with the ball." + }, + { + "from": "human", + "value": "What factors contribute to a successful baseball hit?" + }, + { + "from": "gpt", + "value": "Several factors contribute to a successful baseball hit, and mastering these factors can improve a player's batting performance:\n\n1. Proper stance and body position: A good stance involves keeping the feet shoulder-width apart and knees slightly bent. This allows for better balance, mobility, and power generation.\n\n2. Grip and hand positioning: How a player grips the bat and positions their hands can significantly impact their swing. A firm yet relaxed grip and appropriate hand positioning can help the hitter maintain control and generate optimal bat speed.\n\n3. Bat speed: An essential factor in hitting the ball hard and far, bat speed is generated through a combination of body mechanics, hip rotation, and arm strength.\n\n4. Timing: Excellent timing is crucial in hitting a baseball successfully. The player needs to begin their swing at the right moment to make solid contact with the ball.\n\n5. Swing path: An efficient and consistent swing path is vital for maximizing the chances of making contact with the ball. The hitter must keep their head down and eyes on the ball, following it all the way to the point of contact.\n\n6. Point of contact: Making contact with the ball at the optimal point in the swing is essential. The ideal point of contact can vary depending on the pitch location, swing style, and the desired outcome (such as hitting for power or contact).\n\n7. Follow-through: Once the ball is hit, the player should follow through with their swing, making sure to complete the rotation of the upper body, hips, and arms. This follow-through helps generate power and maintain balance.\n\nIn summary, a successful baseball hit requires a combination of proper stance, grip, bat speed, timing, swing path, point of contact, and follow-through. All these factors work together to improve a player's batting performance and increase their chances of hitting the ball hard and accurately." + } + ] + }, + { + "id": "000000402980", + "image": "000000402980.jpg", + "conversations": [ + { + "from": "human", + "value": "What color and type of vehicle is featured in the image?\n" + }, + { + "from": "gpt", + "value": "The image features an older model red pickup truck." + }, + { + "from": "human", + "value": "What is the condition of the truck's load bed?" + }, + { + "from": "gpt", + "value": "The truck's load bed is loaded with various items, including a large blue object." + }, + { + "from": "human", + "value": "What is the location of the parked truck?" + }, + { + "from": "gpt", + "value": "The red pickup truck is parked in the gravel, in a driveway, and positioned next to a fence." + }, + { + "from": "human", + "value": "Can you describe the size of the red truck?" + }, + { + "from": "gpt", + "value": "The red truck is a small-sized pickup, although the exact dimensions cannot be gleaned from the image, it is visibly smaller than a full-sized pickup truck." + }, + { + "from": "human", + "value": "Is it possible to identify the large blue object in the truck's load bed from the image?" + }, + { + "from": "gpt", + "value": "Based on the image's description, it is challenging to confidently identify the large blue object in the truck's load bed, as detailed information about its shape, material, or purpose is not provided." + } + ] + }, + { + "id": "000000023951", + "image": "000000023951.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the main activity of the person in the image?" + }, + { + "from": "gpt", + "value": "The main activity of the person in the image is flying a kite." + }, + { + "from": "human", + "value": "What colors are present on the kite being flown in the image?" + }, + { + "from": "gpt", + "value": "The kite being flown in the image has blue and red colors." + }, + { + "from": "human", + "value": "What are the weather conditions in the image?" + }, + { + "from": "gpt", + "value": "The weather conditions in the image are overcast, with a cloudy sky in the background." + }, + { + "from": "human", + "value": "Can you describe the setting where the person is flying the kite?" + }, + { + "from": "gpt", + "value": "The person is flying the kite in an open field with a sky in the background. The space is likely chosen for its abundance of open space and minimal obstructions, making it an ideal location for kite flying." + }, + { + "from": "human", + "value": "What factors can impact the kite flying experience?" + }, + { + "from": "gpt", + "value": "Several factors can impact the kite flying experience, such as:\n\n1. Wind conditions: Sufficient and steady wind is essential for successful kite flying. If the wind is too weak or inconsistent, the kite may struggle to stay aloft. Conversely, if the wind is too strong, the kite might become difficult to control or even become damaged.\n\n2. Kite design and materials: The construction, materials, and design of the kite can significantly influence its flight characteristics. Lightweight materials and well-balanced designs can provide better lift and stability, while poorly constructed kites may have difficulty maintaining flight or responding to the user's inputs.\n\n3. Skill and experience of the kite flyer: A skilled and experienced kite flyer can better control the kite, adapting to changing wind conditions and maneuvering the kite more effectively. Beginners may struggle with managing the kite's position and movements, especially in challenging conditions.\n\n4. Obstructions and terrain: Flying a kite in an area with obstructions such as trees, buildings, or power lines can be dangerous and hinder the kite's movement. Ideally, kite flyers should choose open and obstruction-free areas to fly their kites, such as fields or parks.\n\n5. Weather conditions: In addition to wind, other weather conditions can impact the kite flying experience. Overcast skies or rain can make kite flying less enjoyable and potentially more challenging, while sunny and clear days are typically preferred for outdoor activities such as kite flying.\n\nIn conclusion, factors such as wind conditions, kite design, the skill of the kite flyer, the presence of obstructions, and weather conditions can all impact the kite flying experience, either enhancing or hindering the activity." + } + ] + }, + { + "id": "000000136979", + "image": "000000136979.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the main subject of the image?" + }, + { + "from": "gpt", + "value": "The main subject of the image is a bicycle rider who is wearing a backpack and helmet while riding on a busy city street with a public transit bus nearby." + }, + { + "from": "human", + "value": "Is the photo in color or black and white?" + }, + { + "from": "gpt", + "value": "The photo is in black and white." + }, + { + "from": "human", + "value": "What are the other elements present in the image besides the bicycle rider and the bus?" + }, + { + "from": "gpt", + "value": "In addition to the bicycle rider and the bus, the image also features a beautiful woman waiting to cross the street and people nearby, indicating a lively urban environment." + }, + { + "from": "human", + "value": "What safety measures is the bicycle rider taking?" + }, + { + "from": "gpt", + "value": "The bicycle rider is taking safety measures by wearing a helmet while riding, which can protect their head in case of an accident or fall." + }, + { + "from": "human", + "value": "How does the presence of the public transit bus relate to the overall theme of the image?" + }, + { + "from": "gpt", + "value": "The presence of the public transit bus contributes to the overall theme of the image by showcasing a bustling city street with various modes of transportation. The bus, along with the bicycle rider and pedestrians, helps create a picture of a diverse urban landscape where people are engaged in different activities, such as commuting, leisure, or work. This scene highlights the dynamics of a modern city, where public transit, bicycles, and pedestrians coexist, showcasing the importance of urban planning and infrastructure that accommodates multiple forms of mobility for a sustainable and vibrant city life." + } + ] + }, + { + "id": "000000226354", + "image": "000000226354.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the person doing in the image?\n" + }, + { + "from": "gpt", + "value": "The person in the image is riding a skateboard, performing a trick on a yellow ramp or street sidewalk." + } + ] + }, + { + "id": "000000027424", + "image": "000000027424.jpg", + "conversations": [ + { + "from": "human", + "value": "How many people are in the image?\n" + }, + { + "from": "gpt", + "value": "There are three people in the image - a man and two women." + }, + { + "from": "human", + "value": "What is the activity that the people are engaged in?" + }, + { + "from": "gpt", + "value": "The people in the image are engaged in playing a video game, with one person holding a Wii remote, possibly participating in a motion-controlled game." + }, + { + "from": "human", + "value": "What is the setting of the image?" + }, + { + "from": "gpt", + "value": "The setting of the image is a living room." + }, + { + "from": "human", + "value": "What is the position of the women relative to the man holding the remote?" + }, + { + "from": "gpt", + "value": "The two women are standing near the man who is holding the remote. One of the women is raising her hand." + }, + { + "from": "human", + "value": "What kind of game can be played with a Wii remote? Can you give some examples?" + }, + { + "from": "gpt", + "value": "A Wii remote, also known as a WiiMote, is a unique controller used with the Nintendo Wii gaming console. It enables users to play motion-controlled games by incorporating an accelerometer, infrared detection, and other sensors to detect the player's movements and translate them into in-game actions. Some popular games that can be played with a Wii remote include:\n\n1. Wii Sports: This game features multiple sports simulations like tennis, bowling, baseball, golf, and boxing. Players can swing the Wii remote like a tennis racket or roll it like a bowling ball to control their character's movement within the game.\n\n2. Mario Kart Wii: In this racing game, players can use the Wii remote in combination with the Wii Wheel accessory to steer their characters and navigate through various racetracks.\n\n3. The Legend of Zelda: Skyward Sword: This action-adventure game involves controlling the main character, Link, through combat and puzzle-solving challenges. Players can swing the Wii remote like a sword and use other motion gestures for different in-game actions.\n\n4. Just Dance: A popular dance game series, where players follow the on-screen character's choreography and move the Wii remote in sync with the music and dance moves.\n\nThese examples demonstrate the versatility and interactivity that the Wii remote brings to gaming on the Nintendo Wii console. The motion controls and ability to engage players in a more immersive and active gaming experience make it popular among casual gamers and families alike." + } + ] + }, + { + "id": "000000272671", + "image": "000000272671.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the individual in the image doing?\n" + }, + { + "from": "gpt", + "value": "The individual in the image is performing a skateboarding trick or stunt, likely at a skate park." + }, + { + "from": "human", + "value": "Is the skateboarder in the air during this trick?" + }, + { + "from": "gpt", + "value": "Yes, the skateboarder is in the air while performing this trick, which may involve flipping the skateboard." + }, + { + "from": "human", + "value": "What is the gender and approximate age of the skateboarder?" + }, + { + "from": "gpt", + "value": "The skateboarder is a young boy or man, but the exact age cannot be determined from the image. Nonetheless, the individual is relatively young." + }, + { + "from": "human", + "value": "Can you provide some examples of popular skateboarding tricks?" + }, + { + "from": "gpt", + "value": "Certainly! Skateboarding tricks are numerous and come in various levels of difficulty. Some popular skateboarding tricks include:\n\n1. Ollie: This is a basic trick where the skateboarder jumps with the skateboard into the air without using their hands. The ollie is one of the first tricks a novice skateboarder will learn, as it serves as the foundation for many other tricks.\n\n2. Kickflip: In this trick, the skateboarder does an ollie and then uses their front foot to kick the board, causing it to spin in a 360-degree horizontal motion before landing back on the board.\n\n3. Heelflip: Similar to the kickflip, a heelflip involves the skateboarder doing an ollie and then using their back foot to kick the board, causing it to spin in a 360-degree horizontal motion, but in the opposite direction from a kickflip.\n\n4. Pop Shove-it: This trick involves the skateboarder doing an ollie, and then using their back foot to shove the board into a 180-degree or 360-degree spin, without flipping the skateboard.\n\n5. Grind: In this trick, the skateboarder slides along a surface, such as a rail or the edge of a ramp, using the trucks (the metal parts that connect the wheels to the board) instead of the wheels.\n\n6. Manual: Similar to a \"wheelie\" on a bicycle, a manual involves the skateboarder balancing on the back two wheels while rolling, usually for a certain distance.\n\n7. 50-50 Grind: This is a type of grind where both the back and front trucks of the skateboard slide across a surface.\n\nThese are just a few examples of popular skateboarding tricks, but there are many more advanced and creative maneuvers in the world of skateboarding that serve as a form of expression and artistry for skateboarders." + } + ] + }, + { + "id": "000000265432", + "image": "000000265432.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat types of items can be seen on the bathroom counter?" + }, + { + "from": "gpt", + "value": "The bathroom counter is covered with personal care and beauty products, such as hair products and various other supplies related to hygiene and grooming." + }, + { + "from": "human", + "value": "Are there any open drawers or cupboards in the bathroom?" + }, + { + "from": "gpt", + "value": "Yes, the bathroom has open drawers and cupboards, possibly revealing more personal care items or storage spaces." + }, + { + "from": "human", + "value": "How would you describe the overall state of the bathroom?" + }, + { + "from": "gpt", + "value": "The bathroom appears to be cluttered, with a large number of beauty care products on the counter, as well as open drawers and cupboards. This may indicate that the user has many personal care items, or that they have not taken the time to organize and tidy up the space." + }, + { + "from": "human", + "value": "What suggestions could you give to improve the organization of the bathroom?" + }, + { + "from": "gpt", + "value": "To improve the organization of the cluttered bathroom, one could consider the following suggestions:\n\n1. Assess and declutter: Start by going through all the personal care items, and remove any products that are expired, unused, or no longer needed. This will help eliminate excess clutter and make the bathroom more manageable.\n\n2. Utilize storage solutions: Invest in storage solutions such as drawer organizers, plastic bins, or small baskets to keep items categorized and easily accessible. Assign a specific space for each type of product, such as hair care, skincare, or dental hygiene items.\n\n3. Close drawers and cupboards: Make it a habit to close drawers and cupboards once you have retrieved or put away an item. Not only does this keep the bathroom looking tidier, but it also protects items from dust and moisture.\n\n4. Implement a regular cleaning schedule: To maintain the organization of the bathroom, establish a regular cleaning schedule where you tidy up the counter, wipe down surfaces, and reorganize any misplaced items.\n\n5. Use vertical space: If the bathroom counter remains crowded, consider installing wall shelves or cabinets to create additional storage space. This can help to clear some items off the counter and make the bathroom appear more organized.\n\nBy implementing these suggestions, the cluttered bathroom will likely become a more organized, efficient, and visually appealing space." + } + ] + }, + { + "id": "000000154815", + "image": "000000154815.jpg", + "conversations": [ + { + "from": "human", + "value": "\nHow many sheep are visible in the image?" + }, + { + "from": "gpt", + "value": "There are two sheep visible in the image." + }, + { + "from": "human", + "value": "What are the sheep doing in the image?" + }, + { + "from": "gpt", + "value": "The two sheep are standing in the pasture and looking through the fence." + }, + { + "from": "human", + "value": "Is the photograph taken from inside or outside the fenced area?" + }, + { + "from": "gpt", + "value": "The photograph is taken from behind the fence, looking at the sheep through the fence." + }, + { + "from": "human", + "value": "What kind of fence is present in the image?" + }, + { + "from": "gpt", + "value": "There is a barbed wire fence in the image." + }, + { + "from": "human", + "value": "Is the sheep stationary or moving in the image?" + }, + { + "from": "gpt", + "value": "The sheep are stationary, standing and looking through the fence. There is no indication in the image that the small sheep is running away from the fence." + } + ] + }, + { + "id": "000000271198", + "image": "000000271198.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the main feature of the street sign in the image?" + }, + { + "from": "gpt", + "value": "The main feature of the street sign in the image is that it has multiple colorful arrows pointing in various directions, indicating different destinations or routes one could take." + }, + { + "from": "human", + "value": "Are the destinations on the street sign real or make-believe?" + }, + { + "from": "gpt", + "value": "The destinations on the street sign are make-believe, adding a whimsical touch to the sign's appearance." + }, + { + "from": "human", + "value": "How would you describe the appearance of the street sign?" + }, + { + "from": "gpt", + "value": "The street sign has a colorful and playful appearance, featuring a wide variety of signs and arrows pointing in many different directions. Its make-believe destinations make it visually interesting and unique, drawing attention to its vibrant and imaginative design." + }, + { + "from": "human", + "value": "Where do you think a street sign like this would typically be found or used?" + }, + { + "from": "gpt", + "value": "A street sign like this, with colorful arrows and make-believe destinations, would likely be found in a creative or playful setting such as an amusement park, an art installation, or a themed event. It could also be seen in a neighborhood or community with a quirky or eccentric atmosphere, where residents appreciate imaginative and unconventional public art. The sign might serve as a decorative and conversation-provoking element in such environments, rather than providing practical navigational information. However, it is important to note that the specific location of the sign in the image cannot be determined from the details provided." + } + ] + }, + { + "id": "000000062604", + "image": "000000062604.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is the primary subject of the image?" + }, + { + "from": "gpt", + "value": "The primary subject of the image is a large church cathedral with stained glass windows and rows of wooden pews." + }, + { + "from": "human", + "value": "What type of windows does the cathedral have?" + }, + { + "from": "gpt", + "value": "The cathedral features stained glass windows, adding to the beauty and ambience of the interior." + }, + { + "from": "human", + "value": "Are there any notable features or details within the cathedral?" + }, + { + "from": "gpt", + "value": "Some notable features within the cathedral include rows of wooden pews, a walkway between the rows, and lighting throughout the space. These elements contribute to the grandeur and solemn atmosphere of the cathedral." + }, + { + "from": "human", + "value": "How does the interior lighting affect the cathedral's appearance?" + }, + { + "from": "gpt", + "value": "The interior lighting plays an essential role in enhancing the cathedral's ambience and highlighting its architectural details. The light illuminates the space, making the wooden pews, walkways, and other features more visible and inviting. Additionally, the light enhances the beauty of the stained glass windows by shining through the colored panes, creating vibrant patterns on the interior surfaces and adding depth to the overall space. The strategic use of lighting helps to create a serene and contemplative atmosphere, which is fitting for a place of worship." + }, + { + "from": "human", + "value": "Can you tell me more about the role of stained glass windows in cathedrals?" + }, + { + "from": "gpt", + "value": "Stained glass windows have played a significant role in the architecture and design of cathedrals for centuries. In addition to their aesthetic appeal, these windows served various practical, cultural, and religious purposes.\n\n1. Aesthetics: Stained glass windows are known for their intricate designs and vibrant colors, which add beauty and elegance to the cathedral interiors. When sunlight passes through the colored panes, it creates a captivating, otherworldly display of light and shadow inside the building.\n\n2. Storytelling: In the Middle Ages, when the majority of the population was illiterate, stained glass windows were used as a visual medium to convey religious stories and teachings. The windows often depicted Biblical narrative, scenes from the lives of saints, or moral lessons, allowing worshipers to learn about their faith through images rather than text.\n\n3. Symbolism: The themes, images, and colors chosen for stained glass windows were rich in religious symbolism. For example, specific colors could represent virtues, vices, or divine attributes, while various scenes and figures illustrated theological concepts or revered individuals.\n\n4. Light: Stained glass windows contributed to the spiritual experience of the worshipers by creating a unique atmosphere within the cathedral. The windows allowed natural light to enter the space, yet the colors and designs subtly altered the illumination, invoking a sense of transcendent beauty and divine presence.\n\nIn conclusion, stained glass windows were not only a form of artistic expression and decoration in cathedrals but also served essential functions in guiding and inspiring the faith of those who worshiped within their walls. Through captivating aesthetics, storytelling, symbolism, and manipulation of light, these windows became an integral part of the religious and cultural heritage of many historic cathedrals." + } + ] + }, + { + "id": "000000475932", + "image": "000000475932.jpg", + "conversations": [ + { + "from": "human", + "value": "How many people are sitting on the park bench in the image?\n" + }, + { + "from": "gpt", + "value": "There are two people sitting on the park bench in the image." + }, + { + "from": "human", + "value": "What are the two people doing while sitting on the bench?" + }, + { + "from": "gpt", + "value": "The two people, both men, are sitting on the bench, talking, and laughing with each other." + }, + { + "from": "human", + "value": "What is the condition of the wooden bench they are sitting on?" + }, + { + "from": "gpt", + "value": "The wooden bench they are sitting on is old and worn." + }, + { + "from": "human", + "value": "What is unique about the appearance of one of the men sitting on the bench?" + }, + { + "from": "gpt", + "value": "One of the men sitting on the bench has red hair, which makes him stand out visually in the image." + }, + { + "from": "human", + "value": "Does the image include any other people, such as a woman or additional men?" + }, + { + "from": "gpt", + "value": "No, the image only includes two people, both of whom are men. There are no women or additional men in the image." + } + ] + }, + { + "id": "000000279071", + "image": "000000279071.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat type of fruit is shown in the image?" + }, + { + "from": "gpt", + "value": "The image shows a green apple hanging on a tree." + }, + { + "from": "human", + "value": "What is the color of the apple in the image?" + }, + { + "from": "gpt", + "value": "The color of the apple in the image is green." + }, + { + "from": "human", + "value": "How many apples are visible in the image?" + }, + { + "from": "gpt", + "value": "There is only one green apple visible in the image." + }, + { + "from": "human", + "value": "Can you see any leaves in the image? If so, what is their relation to the apple?" + }, + { + "from": "gpt", + "value": "Yes, there are leaves visible in the image. The leaves are sprouting around the green apple, growing from the same leafy branch as the fruit." + }, + { + "from": "human", + "value": "What stage of growth is the apple at?" + }, + { + "from": "gpt", + "value": "Though I can't pinpoint the exact stage of growth, the green apple in the image appears to be relatively large and well-developed, indicating that it is likely nearing maturity but not yet fully ripe. As apples mature, they typically change color from green to red, yellow, or a combination of these colors, depending on the variety. The fact that the apple is still green suggests that it is not yet fully ripe and may need more time on the tree before being ready for harvest." + } + ] + }, + { + "id": "000000503059", + "image": "000000503059.jpg", + "conversations": [ + { + "from": "human", + "value": "What type of vehicle is depicted in the image?\n" + }, + { + "from": "gpt", + "value": "The image depicts a truck, specifically an oil tanker truck, driving down a road." + }, + { + "from": "human", + "value": "What is the truck towing?" + }, + { + "from": "gpt", + "value": "The truck is towing a tanker, which is likely used for transporting oil or other liquid cargo." + }, + { + "from": "human", + "value": "What type of landscape surrounds the truck and the road?" + }, + { + "from": "gpt", + "value": "The road and the truck are surrounded by grass, and there is a hilly field in the background, suggesting a country or rural setting." + }, + { + "from": "human", + "value": "What color is the truck?" + }, + { + "from": "gpt", + "value": "The truck in the image is white." + }, + { + "from": "human", + "value": "Based on the surroundings, where might this truck be heading?" + }, + { + "from": "gpt", + "value": "While it's not possible to determine the exact destination of the truck, the rural setting and grassy environment suggest that it may be heading to or from an oil refinery, a storage facility, or a distribution point within a less urbanized area. Oil tanker trucks are typically used to transport oil between various points of the supply chain, and the truck's presence in a country or rural area implies that it might be servicing locations that are not directly connected to pipelines or railways for oil transportation. Alternatively, the truck could be passing through the countryside en route to an urban area to deliver its cargo." + } + ] + }, + { + "id": "000000531657", + "image": "000000531657.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat are the colors of the fire hydrant in the image?" + }, + { + "from": "gpt", + "value": "The fire hydrant in the image is yellow and black." + }, + { + "from": "human", + "value": "Is there any surrounding snow or ice around the fire hydrant?" + }, + { + "from": "gpt", + "value": "Yes, there is snow surrounding the fire hydrant, and it also has ice around it, indicating cold weather conditions." + }, + { + "from": "human", + "value": "What is the location of the fire hydrant, and is there anything parked nearby?" + }, + { + "from": "gpt", + "value": "The fire hydrant is located by the curb, and there is a car parked close to it." + }, + { + "from": "human", + "value": "What might be some concerns related to the fire hydrant being surrounded by snow and ice?" + }, + { + "from": "gpt", + "value": "Several concerns are related to the fire hydrant being surrounded by snow and ice. These concerns primarily revolve around accessibility, functionality, and safety.\n\n1. Accessibility: A fire hydrant surrounded by snow and ice can be challenging to locate and access during an emergency. Firefighters might lose precious time digging out the hydrant, which could negatively impact their ability to control or extinguish a fire.\n\n2. Functionality: Accumulation of ice on the hydrant might cause it to freeze, hindering its normal operation. Frozen parts can prevent the hydrant from functioning correctly at a critical time, delaying water access in case of a fire.\n\n3. Safety: A fire hydrant covered in snow and ice could become a safety hazard for pedestrians, as they might not notice the hydrant while walking and accidentally slip or fall. Additionally, snow and ice buildup around the hydrant makes it more difficult for drivers to identify parking restrictions, potentially causing problems with legal enforcement and leaving the car parked too close to the hydrant during emergencies.\n\nIn conclusion, the picture of the yellow and black fire hydrant surrounded by snow and ice highlights potential issues concerning accessibility, functionality, and safety. It is essential for local authorities and residents to be vigilant in maintaining and clearing snow and ice around fire hydrants to mitigate these concerns." + } + ] + }, + { + "id": "000000563299", + "image": "000000563299.jpg", + "conversations": [ + { + "from": "human", + "value": "What is the person doing in the image?\n" + }, + { + "from": "gpt", + "value": "The person in the image is skateboarding, riding on a skateboard at a skateboard park." + }, + { + "from": "human", + "value": "What kind of area is the person skateboarding in?" + }, + { + "from": "gpt", + "value": "The person is skateboarding in a skateboard park area made of cement, featuring ramps and walls covered in colorful graffiti." + }, + { + "from": "human", + "value": "Can you describe the graffiti in the background?" + }, + { + "from": "gpt", + "value": "The graffiti in the background is colorful and artistic, creating a vibrant and dynamic environment for the skateboard park area. The graffiti might consist of various patterns, abstract shapes, and text which are typical styles seen in urban art." + }, + { + "from": "human", + "value": "What kinds of surfaces and features are usually found in a skateboard park?" + }, + { + "from": "gpt", + "value": "A skateboard park typically includes a variety of surfaces and features designed to challenge and engage skaters of different skill levels. Some common elements found in a skateboard park are:\n\n1. Ramps: These can be quarter pipes, half pipes, or full pipes, which allow skaters to perform aerial tricks and transitions between different sections of the park.\n\n2. Rails and Ledges: These features provide skaters with the opportunity to perform grinds and slides, showcasing their board control and balance.\n\n3. Stairs and Gaps: Stair sets and gaps between platforms challenge skaters to perform ollies or other tricks that involve jumping and clearing a certain distance.\n\n4. Bowls and Pools: These are large, curved concrete basins that allow skaters to carve and perform flowing, surf-style moves.\n\n5. Flatground: An open, flat area where skaters can practice basic tricks such as ollies, kickflips, and manuals.\n\n6. Street-inspired features: These can include benches, curbs, and planter boxes that mimic obstacles found in urban environments, allowing skaters to hone their skills for street skating.\n\nThe combination of these various surfaces and features enables skaters to practice and progress in their skills while enjoying a diverse and engaging environment specifically designed for skateboarding." + } + ] + }, + { + "id": "000000058046", + "image": "000000058046.jpg", + "conversations": [ + { + "from": "human", + "value": "How many sheep are there in the image?\n" + }, + { + "from": "gpt", + "value": "There are five sheep in the image." + }, + { + "from": "human", + "value": "What is the appearance of the sheep's faces?" + }, + { + "from": "gpt", + "value": "The sheep have black faces." + }, + { + "from": "human", + "value": "What type of landscape is surrounding the group of sheep?" + }, + { + "from": "gpt", + "value": "The group of sheep is surrounded by a hilly landscape with mountains in the background." + }, + { + "from": "human", + "value": "Describe the formation of the sheep in the image." + }, + { + "from": "gpt", + "value": "The five sheep are standing in a row next to each other on a hillside." + }, + { + "from": "human", + "value": "How does the presence of the sheep and the hilly landscape affect the environment?" + }, + { + "from": "gpt", + "value": "The presence of the sheep and the hilly landscape create a serene and pastoral scene that signifies the coexistence of domestic animals and nature. The image showcases the adaptability of these sheep to their natural surroundings, having found a suitable environment that offers both food and shelter.\n\nSheep are commonly found in such landscapes to graze on the available vegetation. Their grazing habits can help to maintain and even enhance the landscape's biodiversity by controlling the growth of plants and promoting the more delicate and diverse flora. Moreover, these grazing activities may support the overall health of the ecosystem by preventing soil erosion, as the sheep's movements help to break up the soil and allow water to penetrate, reducing surface runoff.\n\nIn hilly and mountainous areas, agriculture might be challenging due to the steep slopes and harsh weather conditions. Therefore, pastoral farming, like raising sheep, becomes a viable option for farmers, as it enables food production while preserving the delicate natural balance of the environment. Sheep also provide multiple products, such as wool, meat, and milk, which contribute to the livelihood of the local communities.\n\nIn conclusion, the presence of the group of five sheep on the hilly area not only adds to the picturesque quality of the scene but also exemplifies the symbiotic relationship between domestic animals and their surrounding natural landscape, contributing to the sustainability and wellbeing of both the environment and the local population." + } + ] + }, + { + "id": "000000174930", + "image": "000000174930.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhat is happening in the image?" + }, + { + "from": "gpt", + "value": "In the image, several people are standing on a train platform at a station while a train pulls up. They are waiting to board the commuting train that has just arrived." + }, + { + "from": "human", + "value": "What is the surface of the platform near the train made of?" + }, + { + "from": "gpt", + "value": "The platform near the train is made of bricks, as the people are standing on a brick sidewalk." + }, + { + "from": "human", + "value": "What are the people doing as the train arrives?" + }, + { + "from": "gpt", + "value": "As the train arrives, the people are waiting on the platform, preparing to board it for their commute." + }, + { + "from": "human", + "value": "Does the train appear to be in motion, or is it stopped for passengers to board?" + }, + { + "from": "gpt", + "value": "The train appears to have stopped or is in the process of stopping to allow passengers to board, as the people are standing close to it and are ready to get on." + } + ] + }, + { + "id": "000000489497", + "image": "000000489497.jpg", + "conversations": [ + { + "from": "human", + "value": "What color is the refrigerator in the image?\n" + }, + { + "from": "gpt", + "value": "The color of the refrigerator in the image is white." + }, + { + "from": "human", + "value": "What is the state of the refrigerator's contents?" + }, + { + "from": "gpt", + "value": "The refrigerator is almost empty, with a small amount of food inside, indicating a lack of items stocked." + }, + { + "from": "human", + "value": "Is the refrigerator's door open or closed?" + }, + { + "from": "gpt", + "value": "The refrigerator's door is open, showing the contents inside." + }, + { + "from": "human", + "value": "Are there any other kitchen appliances visible in the image?" + }, + { + "from": "gpt", + "value": "Yes, there is an open toaster oven visible in the image, in addition to the refrigerator and food shelves." + }, + { + "from": "human", + "value": "What can having an almost empty refrigerator imply?" + }, + { + "from": "gpt", + "value": "Having an almost empty refrigerator can imply several possibilities. Here are a few potential reasons:\n\n1. Financial constraints: The person owning the refrigerator might be facing financial difficulties that limit their ability to purchase sufficient groceries.\n\n2. Living alone or with few household members: A nearly empty refrigerator could indicate that the person lives alone or with few household members, reducing the need for stocking up on extensive quantities of food.\n\n3. Infrequent home cooking: The refrigerator's owner might rely heavily on eating out, ordering takeout, or opting for pre-packaged meals, resulting in a decreased need to store fresh ingredients and food items.\n\n4. Recently moved in: The person may have recently moved to a new location and has not yet had the chance to fully stock the refrigerator with food.\n\n5. Imminent travel plans: The refrigerator’s owner could be preparing to travel for an extended period, leading to a conscious decision not to purchase perishable items that may spoil in their absence.\n\nAs we only have an image of the opened refrigerator, it is not possible to definitively determine the reason behind the scarcity of food. However, these are some plausible explanations for the nearly empty state of the refrigerator." + } + ] + }, + { + "id": "000000272171", + "image": "000000272171.jpg", + "conversations": [ + { + "from": "human", + "value": "\nWhere are the people in the image?" + }, + { + "from": "gpt", + "value": "The people in the image are located on a beach, sitting in the sand close to the shore. They are near a small urban setting, possibly on the outskirts of downtown." + }, + { + "from": "human", + "value": "What are the people doing in the image?" + }, + { + "from": "gpt", + "value": "The people in the image are enjoying their time at the beach, sitting around on the sand, and hanging out with each other. They are not engaging in any specific activities and appear to be just relaxing and taking in the beach atmosphere." + }, + { + "from": "human", + "value": "Can you describe the surrounding area of the beach in the image?" + }, + { + "from": "gpt", + "value": "The beach in the image is situated near a small urban setting, likely on the outskirts of a downtown area. This suggests that the beach is located within a city or close to city life, providing a convenient spot for people to relax and unwind without having to travel far from their homes or workplaces. The presence of buildings nearby may be a sign of commercial establishments such as restaurants, cafes, or shops that could cater to the needs of beachgoers and tourists." + }, + { + "from": "human", + "value": "What kind of beach activities might people engage in, besides just sitting or hanging out?" + }, + { + "from": "gpt", + "value": "There are many activities that people could potentially engage in at a beach. Some of these activities include:\n\n1. Swimming: Beaches are popular spots for swimming, either for leisure or fitness. People often enjoy cooling off in the water, especially on hot summer days.\n\n2. Sunbathing: Many beachgoers enjoy sunbathing on the sand, either to get a tan, relax, or simply enjoy the warmth of the sun.\n\n3. Beach sports: People may participate in beach sports such as beach volleyball, soccer, frisbee, paddleball, or even beach yoga. These activities are enjoyable ways to stay active and socialize with others.\n\n4. Beachcombing and exploring: Walking along the shoreline and collecting seashells, rocks, or other interesting items can be a fun beach activity, as well as strolling by rock formations or tide pools to observe the marine life.\n\n5. Building sandcastles: This is a classic beach activity, ideal for families with children or those who enjoy unleashing their creativity by crafting elaborate sand structures.\n\n6. Surfing or bodyboarding: For those who enjoy water sports, beaches with suitable waves may provide opportunities for surfing or bodyboarding.\n\n7. Picnics: Beachgoers can bring a picnic basket and enjoy a meal with their friends and family right by the ocean.\n\n8. Photography: Beaches often provide stunning backdrops for photography, with their natural beauty, sunsets, sunrises, and other scenic moments.\n\nIn the image at hand, the people are not actively participating in any of these activities. They are simply sitting around, hanging out, and enjoying the beach atmosphere." + } + ] + } +] diff --git a/examples/mllm/sft_blip2.sh b/examples/mllm/sft_blip2.sh new file mode 100644 index 00000000..416bb9cd --- /dev/null +++ b/examples/mllm/sft_blip2.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft_mm \ + --do_train \ + --model_name_or_path /home/LAB/fengzc/LLM/checkpoints/Salesforce/blip2-opt-2.7b \ + --dataset llava_instruct_100 \ + --dataset_dir data \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,k_proj \ + --output_dir saves/blip2-opt-2.7b/lora/sft \ + --overwrite_cache \ + --overwrite_output_dir \ + --cutoff_len 1024 \ + --preprocessing_num_workers 16 \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 8 \ + --lr_scheduler_type cosine \ + --logging_steps 1 \ + --warmup_steps 20 \ + --save_steps 100 \ + --eval_steps 100 \ + --evaluation_strategy steps \ + --load_best_model_at_end \ + --learning_rate 5e-5 \ + --num_train_epochs 3.0 \ + --max_samples 3000 \ + --val_size 0.1 \ + --plot_loss \ + --quantization_bit 8 \ + --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 + diff --git a/examples/mllm/sft_instructblip.sh b/examples/mllm/sft_instructblip.sh new file mode 100644 index 00000000..a4330a84 --- /dev/null +++ b/examples/mllm/sft_instructblip.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft_mm \ + --do_train \ + --model_name_or_path /home/LAB/fengzc/LLM/checkpoints/Salesforce/instructblip-vicuna-7b \ + --dataset llava_instruct_100 \ + --dataset_dir data \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,k_proj \ + --output_dir saves/instructblip-vicuna-7b/lora/sft \ + --overwrite_cache \ + --overwrite_output_dir \ + --cutoff_len 1024 \ + --preprocessing_num_workers 16 \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 8 \ + --lr_scheduler_type cosine \ + --logging_steps 1 \ + --warmup_steps 20 \ + --save_steps 100 \ + --eval_steps 100 \ + --evaluation_strategy steps \ + --load_best_model_at_end \ + --learning_rate 5e-5 \ + --num_train_epochs 3.0 \ + --max_samples 3000 \ + --val_size 0.1 \ + --plot_loss \ + --quantization_bit 8 \ + --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 \ + --use_qformer + diff --git a/src/llmtuner/data/__init__.py b/src/llmtuner/data/__init__.py index 792e89d9..27a2f3b8 100644 --- a/src/llmtuner/data/__init__.py +++ b/src/llmtuner/data/__init__.py @@ -1,12 +1,12 @@ from .collator import PairwiseDataCollatorWithPadding -from .loader import get_dataset +from .loader import get_dataset, get_mm_dataset from .template import Template, get_template_and_fix_tokenizer, templates from .utils import Role, split_dataset - __all__ = [ "PairwiseDataCollatorWithPadding", "get_dataset", + "get_mm_dataset", "Template", "get_template_and_fix_tokenizer", "templates", diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index 5414150e..b7377379 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -13,23 +13,21 @@ from .preprocess import get_preprocess_and_print_func from .template import get_template_and_fix_tokenizer from .utils import checksum, merge_dataset - if TYPE_CHECKING: from datasets import Dataset, IterableDataset - from transformers import Seq2SeqTrainingArguments + from transformers import Seq2SeqTrainingArguments, AutoProcessor from transformers.tokenization_utils import PreTrainedTokenizer from ..hparams import DataArguments, ModelArguments from .parser import DatasetAttr - logger = get_logger(__name__) def load_single_dataset( - dataset_attr: "DatasetAttr", - model_args: "ModelArguments", - data_args: "DataArguments", + dataset_attr: "DatasetAttr", + model_args: "ModelArguments", + data_args: "DataArguments", ) -> Union["Dataset", "IterableDataset"]: logger.info("Loading dataset {}...".format(dataset_attr)) data_path, data_name, data_dir, data_files = None, None, None, None @@ -115,11 +113,11 @@ def load_single_dataset( def get_dataset( - tokenizer: "PreTrainedTokenizer", - model_args: "ModelArguments", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - stage: Literal["pt", "sft", "rm", "ppo"], + tokenizer: "PreTrainedTokenizer", + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + stage: Literal["pt", "sft", "rm", "ppo"], ) -> Union["Dataset", "IterableDataset"]: template = get_template_and_fix_tokenizer(tokenizer, data_args.template) if data_args.train_on_prompt and template.efficient_eos: @@ -177,3 +175,33 @@ def get_dataset( raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.") return dataset + + +def get_mm_dataset( + processor: "AutoProcessor", + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + stage: Literal["pt", "sft", "rm", "ppo"], +) -> Union["Dataset", "IterableDataset"]: + tokenizer = processor.tokenizer + if data_args.tokenized_path is not None: + if has_tokenized_data(data_args.tokenized_path): + logger.warning("Loading dataset from disk will ignore other data arguments.") + dataset = load_from_disk(data_args.tokenized_path) + logger.info("Loaded tokenized dataset from {}.".format(data_args.tokenized_path)) + if data_args.streaming: + dataset = dataset.to_iterable_dataset() + return dataset + + if data_args.streaming: + raise ValueError("Turn off `streaming` when saving dataset to disk.") + + with training_args.main_process_first(desc="load dataset"): + all_datasets = [] + for dataset_attr in get_dataset_list(data_args): + local_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name) + all_datasets.append(load_dataset("json", data_files=local_path)['train']) + dataset = merge_dataset(all_datasets, data_args, training_args) + + return dataset diff --git a/src/llmtuner/hparams/data_args.py b/src/llmtuner/hparams/data_args.py index f5f75c77..3b52f1ea 100644 --- a/src/llmtuner/hparams/data_args.py +++ b/src/llmtuner/hparams/data_args.py @@ -88,6 +88,10 @@ class DataArguments: default=None, metadata={"help": "Path to save or load the tokenized datasets."}, ) + image_path: Optional[str] = field( + default=None, + metadata={"help": "Path to images."}, + ) def __post_init__(self): if self.reserved_label_len >= self.cutoff_len: diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index f4f71bc5..cb525699 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -260,7 +260,7 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments, GaloreA default=False, metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."}, ) - stage: Literal["pt", "sft", "rm", "ppo", "dpo", "orpo"] = field( + stage: Literal["pt", "sft", "rm", "ppo", "dpo", "orpo", "sft_mm"] = field( default="sft", metadata={"help": "Which stage will be performed in training."}, ) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 0e42033f..32637f59 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -165,6 +165,10 @@ class ModelArguments: default=False, metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) + use_qformer: bool = field( + default=False, + metadata={"help": "Whether use qformer for Multimodal LLM."}, + ) def __post_init__(self): self.compute_dtype = None diff --git a/src/llmtuner/model/__init__.py b/src/llmtuner/model/__init__.py index 1eaf4271..cf54dafe 100644 --- a/src/llmtuner/model/__init__.py +++ b/src/llmtuner/model/__init__.py @@ -1,10 +1,11 @@ -from .loader import load_model, load_tokenizer +from .loader import load_model, load_tokenizer, load_processor, load_mm_model from .utils import find_all_linear_modules, load_valuehead_params - __all__ = [ "load_model", + "load_mm_model", "load_tokenizer", + "load_processor", "load_valuehead_params", "find_all_linear_modules", ] diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index f73666d5..624d8a85 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -1,24 +1,25 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Union import torch from peft import LoraConfig, LoraModel, PeftModel, TaskType, get_peft_model +from transformers import AutoModelForVision2Seq from transformers.integrations import is_deepspeed_zero3_enabled from ..extras.logging import get_logger from .utils import QuantizationMethod, find_all_linear_modules, find_expanded_modules - if TYPE_CHECKING: - from transformers.modeling_utils import PreTrainedModel + from transformers.modeling_utils import PreTrainedModel, AutoModelForVision2Seq from ..hparams import FinetuningArguments, ModelArguments - logger = get_logger(__name__) def init_adapter( - model: "PreTrainedModel", model_args: "ModelArguments", finetuning_args: "FinetuningArguments", is_trainable: bool + model: "PreTrainedModel", model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: bool ) -> "PreTrainedModel": r""" Initializes the adapters. @@ -43,9 +44,9 @@ def init_adapter( if finetuning_args.finetuning_type == "freeze" and is_trainable: logger.info("Fine-tuning method: Freeze") num_layers = ( - getattr(model.config, "num_hidden_layers", None) - or getattr(model.config, "num_layers", None) - or getattr(model.config, "n_layer", None) + getattr(model.config, "num_hidden_layers", None) + or getattr(model.config, "num_layers", None) + or getattr(model.config, "n_layer", None) ) if not num_layers: raise ValueError("Current model does not support freeze tuning.") @@ -135,9 +136,9 @@ def init_adapter( target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable) if ( - finetuning_args.use_dora - and getattr(model, "quantization_method", None) is not None - and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES + finetuning_args.use_dora + and getattr(model, "quantization_method", None) is not None + and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES ): raise ValueError("DoRA is not compatible with PTQ-quantized models.") @@ -176,3 +177,94 @@ def init_adapter( logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) return model + + +def init_mm_adapter( + model: "AutoModelForVision2Seq", model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: bool +) -> "AutoModelForVision2Seq": + if finetuning_args.finetuning_type == "lora": + logger.info("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA")) + adapter_to_resume = None + + if model_args.adapter_name_or_path is not None: + is_mergeable = True + if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable + assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter." + is_mergeable = False + + if is_deepspeed_zero3_enabled(): + assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3." + is_mergeable = False + + if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable): + adapter_to_merge = model_args.adapter_name_or_path[:-1] + adapter_to_resume = model_args.adapter_name_or_path[-1] + else: + adapter_to_merge = model_args.adapter_name_or_path + + for adapter in adapter_to_merge: + model: "LoraModel" = PeftModel.from_pretrained( + model, adapter, offload_folder=model_args.offload_folder + ) + model = model.merge_and_unload() + + if len(adapter_to_merge) > 0: + logger.info("Merged {} adapter(s).".format(len(adapter_to_merge))) + + if adapter_to_resume is not None: # resume lora training + model = PeftModel.from_pretrained( + model, adapter_to_resume, is_trainable=is_trainable, offload_folder=model_args.offload_folder + ) + + if is_trainable and adapter_to_resume is None: # create new lora weights while training + if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": + target_modules = find_all_linear_modules(model) + else: + target_modules = finetuning_args.lora_target + + if finetuning_args.use_llama_pro: + target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable) + + if ( + finetuning_args.use_dora + and getattr(model, "quantization_method", None) is not None + and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES + ): + raise ValueError("DoRA is not compatible with PTQ-quantized models.") + + peft_kwargs = { + "r": finetuning_args.lora_rank, + "target_modules": target_modules, + "lora_alpha": finetuning_args.lora_alpha, + "lora_dropout": finetuning_args.lora_dropout, + "use_rslora": finetuning_args.use_rslora, + "modules_to_save": finetuning_args.additional_target, + } + + if model_args.use_unsloth: + from unsloth import FastLanguageModel # type: ignore + + unsloth_peft_kwargs = { + "model": model, + "max_seq_length": model_args.model_max_length, + "use_gradient_checkpointing": "unsloth", + } + model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) + else: + lora_config = LoraConfig( + # task_type=TaskType.CAUSAL_LM, + inference_mode=False, + use_dora=finetuning_args.use_dora, + **peft_kwargs, + ) + model = get_peft_model(model, lora_config) + + if (not finetuning_args.pure_bf16) and (not finetuning_args.use_badam): + for param in filter(lambda p: p.requires_grad, model.parameters()): + param.data = param.data.to(torch.float32) + + if model_args.adapter_name_or_path is not None: + logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) + return model diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 4935dd52..eeee69a6 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -1,22 +1,20 @@ from typing import TYPE_CHECKING, Any, Dict -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoProcessor, AutoModelForVision2Seq from trl import AutoModelForCausalLMWithValueHead from ..extras.constants import MOD_SUPPORTED_MODELS from ..extras.logging import get_logger from ..extras.misc import count_parameters, get_current_device, try_download_model_from_ms -from .adapter import init_adapter +from .adapter import init_adapter, init_mm_adapter from .patcher import patch_config, patch_model, patch_tokenizer, patch_valuehead_model from .utils import load_valuehead_params, register_autoclass - if TYPE_CHECKING: from transformers import PreTrainedModel, PreTrainedTokenizer from ..hparams import FinetuningArguments, ModelArguments - logger = get_logger(__name__) @@ -57,12 +55,38 @@ def load_tokenizer(model_args: "ModelArguments") -> "PreTrainedTokenizer": return tokenizer +def load_processor(model_args: "ModelArguments") -> "AutoProcessor": + r""" + Loads processor. Must before load_model. + + Note: including inplace operation of model_args. + """ + init_kwargs = _get_init_kwargs(model_args) + try: + processor = AutoProcessor.from_pretrained( + model_args.model_name_or_path, + use_fast=model_args.use_fast_tokenizer, + split_special_tokens=model_args.split_special_tokens, + padding_side="right", + **init_kwargs, + ) + except Exception: # try the fast one + processor = AutoProcessor.from_pretrained( + model_args.model_name_or_path, + use_fast=True, + padding_side="right", + **init_kwargs, + ) + + return processor + + def load_model( - tokenizer: "PreTrainedTokenizer", - model_args: "ModelArguments", - finetuning_args: "FinetuningArguments", - is_trainable: bool = False, - add_valuehead: bool = False, + tokenizer: "PreTrainedTokenizer", + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: bool = False, + add_valuehead: bool = False, ) -> "PreTrainedModel": r""" Loads pretrained model. Must after load_tokenizer. @@ -159,3 +183,77 @@ def load_model( ) return model + + +def load_mm_model( + processor: "AutoProcessor", + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: bool = False, + add_valuehead: bool = False, +) -> "AutoModelForVision2Seq": + r""" + Loads pretrained model. Must after load_tokenizer. + """ + tokenizer = processor.tokenizer + init_kwargs = _get_init_kwargs(model_args) + config = AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs) + patch_config(config, tokenizer, model_args, init_kwargs, is_trainable) + + model = None + if is_trainable and model_args.use_unsloth: + from unsloth import FastLanguageModel # type: ignore + + unsloth_kwargs = { + "model_name": model_args.model_name_or_path, + "max_seq_length": model_args.model_max_length, + "dtype": model_args.compute_dtype, + "load_in_4bit": model_args.quantization_bit == 4, + "token": model_args.hf_hub_token, + "device_map": {"": get_current_device()}, + "rope_scaling": getattr(config, "rope_scaling", None), + "fix_tokenizer": False, + "trust_remote_code": True, + } + try: + model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs) + except NotImplementedError: + logger.warning("Unsloth does not support model type {}.".format(getattr(config, "model_type", None))) + model_args.use_unsloth = False + + if model_args.adapter_name_or_path: + model_args.adapter_name_or_path = None + logger.warning("Unsloth does not support loading adapters.") + if model is None: + init_kwargs["config"] = config + init_kwargs["pretrained_model_name_or_path"] = model_args.model_name_or_path + model: "AutoModelForVision2Seq" = AutoModelForVision2Seq.from_pretrained(**init_kwargs) + patch_model(model, tokenizer, model_args, is_trainable) + register_autoclass(config, model, tokenizer) + + model = init_mm_adapter(model, model_args, finetuning_args, is_trainable) + + if not is_trainable: + model.requires_grad_(False) + model.eval() + else: + model.train() + + trainable_params, all_param = count_parameters(model) + if is_trainable: + param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( + trainable_params, all_param, 100 * trainable_params / all_param + ) + else: + param_stats = "all params: {:d}".format(all_param) + logger.info(param_stats) + + if model_args.print_param_status: + for name, param in model.named_parameters(): + print( + "name: {}, dtype: {}, device: {}, trainable: {}".format( + name, param.dtype, param.device, param.requires_grad + ) + ) + + return model diff --git a/src/llmtuner/train/sftmm/__init__.py b/src/llmtuner/train/sftmm/__init__.py new file mode 100644 index 00000000..3eb8b2e2 --- /dev/null +++ b/src/llmtuner/train/sftmm/__init__.py @@ -0,0 +1,3 @@ +from .workflow import run_sft_mm + +__all__ = ["run_sft_mm"] diff --git a/src/llmtuner/train/sftmm/collator.py b/src/llmtuner/train/sftmm/collator.py new file mode 100644 index 00000000..e91374bc --- /dev/null +++ b/src/llmtuner/train/sftmm/collator.py @@ -0,0 +1,69 @@ +import json +import os +from dataclasses import dataclass + +import torch +from torch.utils.data import Dataset as Dataset_torch +from datasets import Dataset +from PIL import Image +from transformers import AutoProcessor + + +class ImageCaptioningDataset(Dataset_torch): + def __init__(self, dataset: Dataset, image_path: str, processor: AutoProcessor): + self.processor = processor + self.dataset = dataset + self.image_path = image_path + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + source = self.dataset[idx] + image_id = source['image'] + image = Image.open(os.path.join(self.image_path, image_id)) + convs = source['conversations'] + prompt = convs[0]['value'] + label = convs[1]['value'] + image_inputs = self.processor(image, return_tensors="pt") + image_inputs = {k: v.squeeze() for k, v in image_inputs.items()} + inputs = { + "input_ids": prompt, + "labels": label, + } + for key in image_inputs: + inputs[key] = image_inputs[key] + return inputs + + +@dataclass +class DataCollatorForVis2Seq: + processor: AutoProcessor + use_qformer: bool = False + + def __call__(self, features, return_tensors=None): + processed_batch = {} + for key in features[0].keys(): + if key == 'pixel_values': + processed_batch[key] = torch.stack([example[key] for example in features]) + elif key == 'input_ids': + text_inputs = self.processor.tokenizer( + [example[key] for example in features], padding="max_length", return_tensors="pt", + max_length=512, + ) + processed_batch["input_ids"] = text_inputs["input_ids"] + processed_batch["attention_mask"] = text_inputs["attention_mask"] + if self.use_qformer: + qformer_text_inputs = self.processor.qformer_tokenizer( + [example[key] for example in features], padding="max_length", return_tensors="pt", + max_length=512, + ) + processed_batch["qformer_input_ids"] = qformer_text_inputs["input_ids"] + processed_batch["qformer_attention_mask"] = qformer_text_inputs["attention_mask"] + elif key == 'labels': + text_inputs = self.processor.tokenizer( + [example[key] for example in features], padding="max_length", return_tensors="pt", + max_length=512, + ) + processed_batch["labels"] = text_inputs["input_ids"] + return processed_batch diff --git a/src/llmtuner/train/sftmm/metric.py b/src/llmtuner/train/sftmm/metric.py new file mode 100644 index 00000000..d1af4c17 --- /dev/null +++ b/src/llmtuner/train/sftmm/metric.py @@ -0,0 +1,61 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, Sequence, Tuple, Union + +import numpy as np + +from ...extras.constants import IGNORE_INDEX +from ...extras.packages import is_jieba_available, is_nltk_available, is_rouge_available + + +if TYPE_CHECKING: + from transformers.tokenization_utils import PreTrainedTokenizer + +if is_jieba_available(): + import jieba # type: ignore + +if is_nltk_available(): + from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu + +if is_rouge_available(): + from rouge_chinese import Rouge + + +@dataclass +class ComputeMetrics: + r""" + Wraps the tokenizer into metric functions, used in Seq2SeqPeftTrainer. + """ + + tokenizer: "PreTrainedTokenizer" + + def __call__(self, eval_preds: Sequence[Union[np.ndarray, Tuple[np.ndarray]]]) -> Dict[str, float]: + r""" + Uses the model predictions to compute metrics. + """ + preds, labels = eval_preds + score_dict = {"rouge-1": [], "rouge-2": [], "rouge-l": [], "bleu-4": []} + + preds = np.where(preds != IGNORE_INDEX, preds, self.tokenizer.pad_token_id) + labels = np.where(labels != IGNORE_INDEX, labels, self.tokenizer.pad_token_id) + + decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True) + decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True) + + for pred, label in zip(decoded_preds, decoded_labels): + hypothesis = list(jieba.cut(pred)) + reference = list(jieba.cut(label)) + + if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0: + result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}} + else: + rouge = Rouge() + scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference)) + result = scores[0] + + for k, v in result.items(): + score_dict[k].append(round(v["f"] * 100, 4)) + + bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) + score_dict["bleu-4"].append(round(bleu_score * 100, 4)) + + return {k: float(np.mean(v)) for k, v in score_dict.items()} diff --git a/src/llmtuner/train/sftmm/trainer.py b/src/llmtuner/train/sftmm/trainer.py new file mode 100644 index 00000000..96b86b44 --- /dev/null +++ b/src/llmtuner/train/sftmm/trainer.py @@ -0,0 +1,137 @@ +import json +import os +from types import MethodType +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from transformers import Seq2SeqTrainer + +from ...extras.constants import IGNORE_INDEX +from ...extras.logging import get_logger +from ..utils import create_custom_optimzer, create_custom_scheduler + +if TYPE_CHECKING: + from transformers.trainer import PredictionOutput + from peft import PeftModelForCausalLM + from ...hparams import FinetuningArguments + +logger = get_logger(__name__) + + +class CustomSeq2SeqTrainer(Seq2SeqTrainer): + r""" + Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE. + """ + + def __init__(self, finetuning_args: "FinetuningArguments", **kwargs) -> None: + super().__init__(**kwargs) + self.finetuning_args = finetuning_args + if finetuning_args.use_badam: + from badam import clip_grad_norm_for_sparse_tensor + + self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator) + + # def compute_loss(self, model, inputs, return_outputs=False): + # print(inputs.keys()) + # device = "cuda" + # input_ids = inputs.get("input_ids").to(device) + # pixel_values = inputs.get("pixel_values").to(device, torch.float16) + # attention_mask = inputs.get("attention_mask").to(device) + # labels = inputs.get("labels").to(device) + # + # outputs = model(input_ids=input_ids, + # pixel_values=pixel_values, + # labels=labels, + # # attention_mask=attention_mask, + # ) + # loss = outputs.loss + # print("Loss:", loss.item()) + # return (loss, outputs) if return_outputs else loss + + def create_optimizer(self) -> "torch.optim.Optimizer": + if self.optimizer is None: + self.optimizer = create_custom_optimzer(self.model, self.args, self.finetuning_args) + return super().create_optimizer() + + def create_scheduler( + self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None + ) -> "torch.optim.lr_scheduler.LRScheduler": + create_custom_scheduler(self.args, num_training_steps, optimizer) + return super().create_scheduler(num_training_steps, optimizer) + + def prediction_step( + self, + model: "torch.nn.Module", + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: + r""" + Removes the prompt part in the generated tokens. + + Subclass and override to inject custom behavior. + """ + labels = inputs["labels"].detach().clone() if "labels" in inputs else None # backup labels + if self.args.predict_with_generate: + assert self.tokenizer.padding_side == "left", "This method only accepts left-padded tensor." + prompt_len, label_len = inputs["input_ids"].size(-1), inputs["labels"].size(-1) + if prompt_len > label_len: + inputs["labels"] = self._pad_tensors_to_target_len(inputs["labels"], inputs["input_ids"]) + if label_len > prompt_len: # truncate the labels instead of padding the inputs (llama2 fp16 compatibility) + inputs["labels"] = inputs["labels"][:, :prompt_len] + + loss, generated_tokens, _ = super().prediction_step( # ignore the returned labels (may be truncated) + model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys + ) + if generated_tokens is not None and self.args.predict_with_generate: + generated_tokens[:, :prompt_len] = self.tokenizer.pad_token_id + generated_tokens = generated_tokens.contiguous() + + return loss, generated_tokens, labels + + def _pad_tensors_to_target_len(self, src_tensor: torch.Tensor, tgt_tensor: torch.Tensor) -> torch.Tensor: + r""" + Pads the tensor to the same length as the target tensor. + """ + assert self.tokenizer.pad_token_id is not None, "Pad token is required." + padded_tensor = self.tokenizer.pad_token_id * torch.ones_like(tgt_tensor) + padded_tensor[:, -src_tensor.shape[-1]:] = src_tensor # adopt left-padding + return padded_tensor.contiguous() # in contiguous memory + + def save_predictions(self, predict_results: "PredictionOutput") -> None: + r""" + Saves model predictions to `output_dir`. + + A custom behavior that not contained in Seq2SeqTrainer. + """ + if not self.is_world_process_zero(): + return + + output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl") + logger.info(f"Saving prediction results to {output_prediction_file}") + + labels = np.where( + predict_results.label_ids != IGNORE_INDEX, predict_results.label_ids, self.tokenizer.pad_token_id + ) + preds = np.where( + predict_results.predictions != IGNORE_INDEX, predict_results.predictions, self.tokenizer.pad_token_id + ) + + for i in range(len(preds)): + pad_len = np.nonzero(preds[i] != self.tokenizer.pad_token_id)[0] + if len(pad_len): + preds[i] = np.concatenate( + (preds[i][pad_len[0]:], preds[i][: pad_len[0]]), axis=-1 + ) # move pad token to last + + decoded_labels = self.tokenizer.batch_decode( + labels, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True) + + with open(output_prediction_file, "w", encoding="utf-8") as writer: + res: List[str] = [] + for label, pred in zip(decoded_labels, decoded_preds): + res.append(json.dumps({"label": label, "predict": pred}, ensure_ascii=False)) + writer.write("\n".join(res)) diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py new file mode 100644 index 00000000..9f952772 --- /dev/null +++ b/src/llmtuner/train/sftmm/workflow.py @@ -0,0 +1,105 @@ +# Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py +import os +from typing import TYPE_CHECKING, List, Optional + +import torch +from PIL import Image +from torch.utils.data import Dataset +from transformers import DataCollatorForSeq2Seq, LlavaNextForConditionalGeneration, AutoModelForVision2Seq + +from ...data import split_dataset, get_mm_dataset +from ...extras.constants import IGNORE_INDEX +from ...extras.misc import get_logits_processor +from ...extras.ploting import plot_loss +from ...model import load_model, load_tokenizer, load_processor, load_mm_model +from ..utils import create_modelcard_and_push +from .metric import ComputeMetrics +from .trainer import CustomSeq2SeqTrainer +from .collator import DataCollatorForVis2Seq, ImageCaptioningDataset + +if TYPE_CHECKING: + from transformers import Seq2SeqTrainingArguments, TrainerCallback + + from ...hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments + + +def run_sft_mm( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + callbacks: Optional[List["TrainerCallback"]] = None, +): + processor = load_processor(model_args) + tokenizer = processor.tokenizer + model = load_mm_model(processor, model_args, finetuning_args, training_args.do_train) + dataset = get_mm_dataset(processor, model_args, data_args, training_args, stage="sft") + if training_args.predict_with_generate: + tokenizer.padding_side = "left" # use left-padding in generation + if getattr(model, "is_quantized", False) and not training_args.do_train: + setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction + splited_dataset = split_dataset(dataset, data_args, training_args) + splited_dataset['train_dataset'].set_format(type=splited_dataset['train_dataset'].format["type"], + columns=list(splited_dataset['train_dataset'].features.keys())) + splited_dataset['eval_dataset'].set_format(type=splited_dataset['eval_dataset'].format["type"], + columns=list(splited_dataset['eval_dataset'].features.keys())) + train_dataset = ImageCaptioningDataset(splited_dataset['train_dataset'], data_args.image_path, processor) + eval_dataset = ImageCaptioningDataset(splited_dataset['eval_dataset'], data_args.image_path, processor) + data_collator = DataCollatorForVis2Seq( + processor=processor, + use_qformer=model_args.use_qformer, + ) + + # Override the decoding parameters of Seq2SeqTrainer + training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len + training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams + + # Initialize our Trainer + trainer = CustomSeq2SeqTrainer( + model=model, + args=training_args, + finetuning_args=finetuning_args, + tokenizer=tokenizer, + data_collator=data_collator, + callbacks=callbacks, + compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + ) + + # Keyword arguments for `model.generate` + gen_kwargs = generating_args.to_dict() + gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids + gen_kwargs["pad_token_id"] = tokenizer.pad_token_id + gen_kwargs["logits_processor"] = get_logits_processor() + + # Training + if training_args.do_train: + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + trainer.save_model() + trainer.log_metrics("train", train_result.metrics) + trainer.save_metrics("train", train_result.metrics) + trainer.save_state() + if trainer.is_world_process_zero() and finetuning_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) + + # Evaluation + if training_args.do_eval: + metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) + if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled + metrics.pop("eval_loss", None) + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + # Predict + if training_args.do_predict: + predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) + if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled + predict_results.metrics.pop("predict_loss", None) + trainer.log_metrics("predict", predict_results.metrics) + trainer.save_metrics("predict", predict_results.metrics) + trainer.save_predictions(predict_results) + + # Create model card + create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index a8a2b8e9..ac56289c 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -14,12 +14,11 @@ from .ppo import run_ppo from .pt import run_pt from .rm import run_rm from .sft import run_sft - +from .sftmm import run_sft_mm if TYPE_CHECKING: from transformers import TrainerCallback - logger = get_logger(__name__) @@ -31,6 +30,8 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra run_pt(model_args, data_args, training_args, finetuning_args, callbacks) elif finetuning_args.stage == "sft": run_sft(model_args, data_args, training_args, finetuning_args, generating_args, callbacks) + elif finetuning_args.stage == "sft_mm": + run_sft_mm(model_args, data_args, training_args, finetuning_args, generating_args, callbacks) elif finetuning_args.stage == "rm": run_rm(model_args, data_args, training_args, finetuning_args, callbacks) elif finetuning_args.stage == "ppo": From 4f3d558f67f6de3bc1a05a2bba860c0eeeb02587 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Tue, 23 Apr 2024 18:47:03 +0800 Subject: [PATCH 02/34] add multimodal LLM BLIP-2 and InstructBLIP --- examples/mllm/sft_instructblip.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/mllm/sft_instructblip.sh b/examples/mllm/sft_instructblip.sh index a4330a84..055c639a 100644 --- a/examples/mllm/sft_instructblip.sh +++ b/examples/mllm/sft_instructblip.sh @@ -31,5 +31,4 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --plot_loss \ --quantization_bit 8 \ --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 \ - --use_qformer - + --use_qformer \ No newline at end of file From e1afbea68f9885ab4caebc948067c713cf612cf0 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Tue, 23 Apr 2024 19:22:42 +0800 Subject: [PATCH 03/34] add multimodal LLM BLIP-2 and InstructBLIP --- examples/mllm/sft_blip2.sh | 5 ++--- examples/mllm/sft_instructblip.sh | 4 ++-- src/llmtuner/data/loader.py | 1 - 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/examples/mllm/sft_blip2.sh b/examples/mllm/sft_blip2.sh index 416bb9cd..ac0a3f11 100644 --- a/examples/mllm/sft_blip2.sh +++ b/examples/mllm/sft_blip2.sh @@ -14,7 +14,7 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --overwrite_output_dir \ --cutoff_len 1024 \ --preprocessing_num_workers 16 \ - --per_device_train_batch_size 1 \ + --per_device_train_batch_size 4 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 8 \ --lr_scheduler_type cosine \ @@ -30,5 +30,4 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --val_size 0.1 \ --plot_loss \ --quantization_bit 8 \ - --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 - + --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 \ No newline at end of file diff --git a/examples/mllm/sft_instructblip.sh b/examples/mllm/sft_instructblip.sh index 055c639a..92478500 100644 --- a/examples/mllm/sft_instructblip.sh +++ b/examples/mllm/sft_instructblip.sh @@ -14,7 +14,7 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --overwrite_output_dir \ --cutoff_len 1024 \ --preprocessing_num_workers 16 \ - --per_device_train_batch_size 1 \ + --per_device_train_batch_size 4 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 8 \ --lr_scheduler_type cosine \ @@ -24,7 +24,7 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --eval_steps 100 \ --evaluation_strategy steps \ --load_best_model_at_end \ - --learning_rate 5e-5 \ + --learning_rate 1e-5 \ --num_train_epochs 3.0 \ --max_samples 3000 \ --val_size 0.1 \ diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index b7377379..b3af434b 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -184,7 +184,6 @@ def get_mm_dataset( training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo"], ) -> Union["Dataset", "IterableDataset"]: - tokenizer = processor.tokenizer if data_args.tokenized_path is not None: if has_tokenized_data(data_args.tokenized_path): logger.warning("Loading dataset from disk will ignore other data arguments.") From cfb485eddff0130422416b50c50e171fccc8103e Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 00:22:43 +0800 Subject: [PATCH 04/34] add llava and instructblip --- data/dataset_info.json | 9 +- data/images/1.jpg | Bin 0 -> 12324 bytes data/images/2.jpg | Bin 0 -> 49681 bytes data/images/3.jpg | Bin 0 -> 70036 bytes data/llava_instruct_100.json | 4266 ----------------- data/llava_instruct_example.json | 65 + data/mllm_example_dataset/README.md | 25 + data/mllm_example_dataset/data/test-0.parquet | Bin 0 -> 4580 bytes .../mllm_example_dataset/data/train-0.parquet | Bin 0 -> 4580 bytes examples/mllm/sft_instructblip.sh | 16 +- examples/mllm/{sft_blip2.sh => sft_llava.sh} | 17 +- scripts/make_mllm_instruct.py | 95 + scripts/test_mllm.py | 84 + src/llmtuner/data/loader.py | 3 +- src/llmtuner/data/preprocess.py | 2 +- src/llmtuner/hparams/data_args.py | 4 - src/llmtuner/hparams/model_args.py | 4 - src/llmtuner/model/adapter.py | 22 +- src/llmtuner/model/loader.py | 3 +- src/llmtuner/train/sftmm/collator.py | 82 +- src/llmtuner/train/sftmm/trainer.py | 95 +- src/llmtuner/train/sftmm/workflow.py | 35 +- 22 files changed, 343 insertions(+), 4484 deletions(-) create mode 100644 data/images/1.jpg create mode 100644 data/images/2.jpg create mode 100644 data/images/3.jpg delete mode 100644 data/llava_instruct_100.json create mode 100644 data/llava_instruct_example.json create mode 100644 data/mllm_example_dataset/README.md create mode 100644 data/mllm_example_dataset/data/test-0.parquet create mode 100644 data/mllm_example_dataset/data/train-0.parquet rename examples/mllm/{sft_blip2.sh => sft_llava.sh} (58%) create mode 100644 scripts/make_mllm_instruct.py create mode 100644 scripts/test_mllm.py diff --git a/data/dataset_info.json b/data/dataset_info.json index bfda162f..6c7088f6 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -414,9 +414,10 @@ }, "folder": "python" }, - "llava_instruct_100": { - "file_name": "llava_instruct_100.json", - "file_sha1": "96fa18313544e22444fe20eead7754b17da452ae", - "ranking": true + "llava_instruct": { + "hf_hub_url": "HuggingFaceH4/llava-instruct-mix-vsft" + }, + "mllm_instruct_example": { + "hf_hub_url": "data/mllm_example_dataset" } } \ No newline at end of file diff --git a/data/images/1.jpg b/data/images/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a29762eda5e2e31e178768b14b5d5265d53331e6 GIT binary patch literal 12324 zcmb7qWl$VUu)O5D^d& z5fBm*5fBjlHxm&O{96%IFjEtgGLR7v(D2YOu&{A*auQSV3G;CXF>`RT!Jwj|Vq##D zVPlhVFi=r*{GaW;7l4Bd0K>4u!QcR3abVzZVBQA-^+6{CZ(`ea}Kjt+)_*$ zE@(`kyIi$D<&UA_fG%P~v93v6Y@LXMM?rNKkKTHbQTxl133MiHwr_YUn-5M^@ zxOD02tj4}3+DIq5)v+t7wvN$*YpPqN&1%1*T18wK+6s7(b7I$xS5+jiTMK(~FGV+% zSjkc<_Nf5Tth|(z$-zPN?l=vV=R#g6J8Omkbe1JrJ;e{j9x|a7$Wwtr z1Yfjpk7YqE?a^C|1#t+m*f7aH?Z)|#{lo%VHT2!OQrD_X#kw;*A>?Zt5o&I>R^9j< zJwD$)Ause{7{+#VofK(zji_2HbHF04b48L3mqnBBZ_vo6Cgn#Y)l0{?6~?qc%eCi% zVCi$~I5OVjIv0(0*2$WME4h3tua;CPZr=sjczRLTB5gU~onb_pMJpo6?iHbUAh-#4 zx8W0}pL*;(=io@@%!;wtHct$sG#Jl)7++V~rG58=ZFE+5+oUrT=L3D)gRfQFxcvs{ z51OzO#uc(9I)ABjoooRaGEj`kOItGf%sSob7D=_$nR zH3fvk{&Z&R0u`GxdD+`j^rTaKll}++=6Sojq&X{Reg(@{gZ}mWGVsFJdet;T^c4Y9 znyyOfNm+!3XJ~cWT#XYuk@t;#kts(Wdp-MwaeA}L%xwQE6x89q4p=6T=Af2M*UAl} z^pVVL3tVt@!Bpyu^AY!h`q!WSoKhAXr%pgJOYv4Lad3(S*{JHOkc|lu|FHH;)X~!v zUMaJm{btDAeOQu2z+!UFlo@?tII-e_R}`a-dRyCSp9y}%6V@)n>2h!*^=V3GU3?^6 z+MFaSkTI@xTar*Pb0YB{cgiz^g(EH@5x4>7i7k|zFD>4pFJ`LMX6KhA-T05eA}E@& zA2>2i%6+v0QTeRK#(eS@(DAo-&a0 z{wlo>?5Fb4p#GWK6ZLpJPQ)EtoD!zYk4yy)-drwhX^6EY1~oJ_u~l|DU;$h9?M6{z z9$eAnJ@6kx+d3UD-}93;Z~3m64kr=O)0S|yq~6C=q@^@nudEbEm29$`2mC>N7WHR2 zxCv~_$h%6j)cE1mGl+EshN-_^5_hzDmJ9qOrd|ah@ddt@u^YRR^BVjjPvj?WoVf0x zuc~Y2@)1lxWoe|*ynB#c;Q|q&kVu%^56{o(%3T3QvSjL%`;m;9cgDZdsJ!r`pl%y- z-s@O6@SO#DdwF8dDYaTbM;bO`lRWIH=3i{a^N9GqG?{!NP;QJqv>*`iFw`;N8HdLi zuZMD=QP;N|$fR!AoV1_~nPnF)I%)qbXh?(o3x1;k@?sGdF5T%bevR4?B?S&9Da6*c zxJJ{{bog`uZ*c8K%3UsYW|7ZHj)6@ zF32GD8Z5n}k;Tz#GJ%Kat`X_3$-;dZL=ciTfxvhk5maO9j=t=M#Ghwyg@qiuH{; zsmB{g`LeVb&~c@9t`=RBQeBEZwpgE}+XqyhSR{oX6b^PiXY_EFVuoIAoA6njnhts9 z3l8NX1j>ruZAwuSB>sq1!_dSv{lLvdzNuD4D!QWqc7`4Iz0ztBWV{z_t=!g08Vu0l z3O3SN#dM;5w$Vi4dh&RYqv4)8IdGSEFLj?)Q(@b-bFbI5N*=EV-%w><@PU;VnA~Tq z?U5^rmt|D;vfQl?cF6RszkJI~!PbzCcAdJOZ+DKu=WII!N7N>e5|C)vE8E~dRiMP# zP)sUf)_q;I*K3S#vAcFk=p4yQGt`z*_zpk4Tg{GGXDB;Vai$%ChK{^dPFS`q=id+4 zrKhjN_$OPo76CS=O`uhqORNPB`II@;ABo+OocU8y9f%Q$Pp1jpFs|tnr7zK`4bDBC z&`rfHJZ+VdlP0j&ctS)DF^R;O5S)SG3L~JAX%6*5ASb|e25*T7cf&rqZ@BE_x+*wC z)N)+ye@|={+P=+)`qoYvoRMF5V5ET}!*Ne>89u$ric)k(>3*kcjI)fdh*p=SgL_$) zN0p=hREDki_j!>KM=8_Wf?f;rgjEJtK3Qo&FZWrtvtqu%JVC<`T!7tTeb}Fr{h8W_ zAnCIIry0x@V|L_Rl0WC3R{9|t6{w9z%4TNfR?b@h#m&KBNBnJa-INQk7M+a&Pl7E& zp+LPmPnn==mc9qU8j+cRkr~3A=kCSX|pV zw~JfH9SRyA#13n5a;Bl|8jBogK1#?I@O!vK)kGU?K2~hcXNgdFCQy&t0za z{3F@-d!p3l$7QAb38O!oc$FK->;*N{aCGSEr~n<;6qK)@D0f)QN$(Mx-lk#r}&B8f%E*YZI{;A^QW%L7k17mc0zf_f#E zb=M1ahS*~ri7RD)ZE1*`c7$u&QU#uIh7iJr&0CBFhP)EPIZ|)!Ggy-jXCEhbi=p_w zuAd)^zhh0$paekr6dujh`2-S7VSlLA5Sm#qHOp*9moa4VvWk&2SyvWkEPJMJj@Qmu zYRXEQ;)!wvsQ7((hWPeR6mk*mlow4W8u8DERp!+ zR6yWX2uFr(^77inJ}VeGdL<{VCD@SHPE$;KN<(n%>(%tikNndXaqV=ZU1|@C>A+$I zW0LVai#LZ|lV#_ftkqO(CL<%HkHtyg)ENGW=lEKaVJmSN`YR3Q3pP%JEN#Y#b(`+R z&K5SJ#xJ)oy?F&arsTzZPOZ`t7c=QZ1unRDXM#iYoe{qIEYmXL-4{?>-gIQpI8GX5 zZ0K6z`iekej$=cG$;l+|gxdRexkDT%_fV@UJn_!gRT=0}#bK%@ozzF?&$oP|G#&vy z4E2}bihQFi{AD8e4*2qhd}~IwyI~?jeXjJYU`1};De&@mt`S^q8Lv#ac{T7X7uWLQ( zF@-HD{K2KOux$q@5^vzd^J{J`omsP|tSE>xt*uICm$6tR`ID=Q@qu_~%gNv2Ep(C_ zFbhMvgh$He+}_;_h$~i-vb6E<=eIWU>)*)^vbU4Oz zyx8N$$EWZC2Qk4s@|r3`U4yI$cy4YPq%_3jWKZWeEw8%30@_k)43QyOVzw^X{*Exj z(BbT{e2AQ}?@Nc>VS5+nowA!{Jbi0>tHVTw(1COcUmqKd-O*ZWf_O$1>||v^(x54A zC-NcT#R@>|zE9#Kr~o>NCNyP7PV z){)dHjBj_Q2@T!}DLUr$#PKcQg(eYJ?uuF*=Yin|#nJ`$9d|TW7iX@WHRGjiXKQs? z>eJz{y*z3ha|)InO(xBqb41kn)}MqFdGq7HqqBcy22S-R_e6cNJXb)P+*nJoiYdGG zsyc8Mn)~yX`3^{#Y4!PXg@wC~3YQv6Z5xYIqYY#GrVY3lZ@5;=3mZCGvKk}a)7S{_ zxOuGquz8E#nq~gur3*tmmO44agU(W9(oSKM8j~fsY%$D{A$eydg$QHUj2Z4%a~V+t zX)Yn|x*yV3;tso*U#qQgIQEU8Q1p={=i@a!?77g`U7uqr;_>E<9l*=8920jLhUG8$DeDzTbR<&F>Z z((&{o><3VQ;|($PoJ{IX{Nm+jl3JM?J%DzcR<*proIOy#rQeQa3EPgLp!G<%q&_> zZxQE#pC3Hj$>@xaj3!0_7y5$j{HYDvPNz$|#M=nL?oZ~QHDOYL_#DHp2ZD-HYqMo| z%Nh<^*^43YH%a(j`*(D6X3Gt*OMafF*N?F&#+91b#096=TXSQzV{-xou3YXXuq-5s zn+43OS$IewZMVU|^x1<7ub}{fNfyayh)X>^>B0@u%&>S0_wAcb z&BX@04Nm;?pybv(q#+bZoeJL5)3s{0ND_uqTBrxZ7F((&OCeH^vt-vzg!YL=_P9jN6}-6;&3xTaeCUf zV#*8%h4yYc1`4O2DW)XEWn8%zN<<)q9b%y zcePbM>UP@}7W-3VH{sZIL6hA%%IhB6+VgJ=Qo)2GVeF}g->F{N`921J>~Iiq(ZE&U z%@@&Rr-^EbKw%25ct#HXf2VuHL`<}e1sqhXs`SM5|*%PiavoxgLlWX7JQ1*k`1#x%1{g8IQ1*0uC?=T1z zZj2#p;K>(qr*Fg5`8CwD=E@39IO?Y|>-f?Hb}u5rCrMofV;m$8Roam5-u!I%e5b0A z@Ty{@L9<16%gHP{gV~*u-$*w_>C+2_d=d>-uadaX>V(IDKjdqd=3Wiuse}yugW%B^8 ztAw^Dm>WrE)Ok$VimUEIr3T~Vo-ULhyv-E(;ac5U(-NE8`>vxkToHq~vWIDQD)?;$CUeX(aCS| z^Pu&pCcBoQZA7Vm`)cy`~@&k>$owFy5vaN?|OgBFusAHf#@6wm`et$_it=5WuGlEdUn6W*(6re zv<7!+1{x|>`73enw$Ve`wOE~r90dK$P=w1O6=2;ylP%HLZkQ8s07yDwb%47)Wo_q2Yt#h8tF!Q0}Hc& zuFJ!c`^M^yvjC5Vl5yC;R<5WJ3J00b!5a4B2$N9e^+7;Dbyia&WTdJqJDs*+9*aD! zVWq9lo;Ars-E0^n)T?P5`@6#G_FJ87Z3O!OOOGX5J5r8wJ0%Q@Uc)rXZu-T&wot)` z1H9L&prLXb&-{!sU*JlgD{heDaJK3%I9(?;TXGVYq5c>ML*c=2P`Ygg4j(PGPo>p_ z`~&uC+IQ#^DR+=3g=DgAXl(urM5;GYp~n7(v(3G}xv8enoKo8|_hgBg^EboCyrqPM zJ`u3z0VP+pdAMg`c;swEcQ{^M%m(rX=RVBn!+pt^KBl$PLHFdh<@mmiY;O2$EzBby z7*Nk+BY!uO-o6L#eXyoW4lq((BY$?!CHn)k9>z0&?U>=~kxe-IUTb4vRvCOp!J0jH zfirjRnpS$YTDEz+Coy->jBSj!CSU9kc-;g?UUeqYm&HqN$lnvVCX z8K$@wKB+H?Fmd_8?v)n$9ol&D4nQoX!wsM-VxqvZp>QUY3sN$#1Z@IW9v;OO*mrtx zZng;B$!>uy$mi(kjf{d$DW;Vevrb?2*5=K7&V$E2oZ+g|9^k`4^&eZN@>NP=D6ijs z3f9bj}*w_HYJ<85SkOXvm0c( ziRsh8Qk>4B5#F#@$W(i&&t;fnnGN!hFjg$+W!PF0e8!wNoR_!Qp)I77wi#97XK6{@ zD&(DH6&%{1?QB_8qP-;IOb$3QB_gzu5`R>#wNLjf!^33h zH=NnK!4^7qQ1A6+TJwK)W)0KtE^NOgGnf*S`;KAyjpJ*zU?*DI=;sk5t~x#f*;Vt_ zd$qTLCG>ly1F7$GQHA*cTn{VaA^M+;dftBaz zyROnKAR#7?Iwk&uWb~90F+v3EwP~CCC7TNZonr^qDx-r}y1-N;@P#xS2mU1cj>P)$8yOQDy z7dh~3yW(8edErSb772@AN|gG{Fgo+g^iIvq($7!~@}nFH{O3O_X3*KRnrQILbMOym z+*jFF;6j@c+Q~8KQIqncKAI&Z3TtFkn?7L{KT`-E?pX1lcyw+Z|C4o}yZkAom;oU&_5? zNAXaBLAZc6C`CSUN}WE10KE^d%pGC}B_wz$ks_0K&FzcCfqy{M7 zO|H$~i7>7%T+3CDHOx&(Jue4mEBXxNw`V8zGOy@kZ-#WwMv^YsepVAY z)6Hxb9ys1oBH};ts+ElJ^&4yG-x`0#W~Df1Yke6xmwld`pGYZJ$)9SX}1^ z!OLF;FSI!+N~AE%36bfyS&TMhE5w8L~oGlq@OlvHqHVJ>}Sc82v_b@S8%isNDLh zsz4FHw3ChrXRm}8jHg_1sSRX;Vr*+UbBY7M_=)zglmWX2&WT}`TCTN;13x>j zCLdIz*gg};#Bx(mc9J+Q8CmogK+|G3lL*xZ|fFb`VR) zf=Er|KI_36#%0lYYX^@N)h=cv50l}z`gBIl>!uv(uknLee6{Xzg`?LvshhA3CO((p z)I7CQHaV#`e&D6*KLFZRMcm}cnq`pXONu*dOLwyd9;sY29Cie?+{~AAq?4Jf&pVjd z*lkgb|I}m3@*tGx_3Ql3qI)XH33T$O*857EOgEivwn}J~X==xfU~jxTqb^!CY}QDI zXIqn1Mqf31d^7Fq>abJWIFkVNTw`N5Q$Ge|lTnubEz}R(Tc?;fvR`2DAjGeKLOu9t zmunIP8H;NlE&b9q@KT@9{uBX{Gkga?0;E3erN1(%+h_{)xBVu3D7s>bMVeYFrD7HL z6aM9sk<;g=eVp-{3q&j2z622A*%l7<7nRDCR1Mu*k z$<{3h)u;3hs|2lsJk%f!tRii0+D zVlBj?7AqOLZ~VbE9)E=9vPbU9`VX{7{Z`tYs1es4R6Bk;yhpi?^ml(nR~KQLRmhXS0tM_D2-qJ?I6$= z;;27FT8j|)^;nO8Yy@%V;#5GlA$Pm%*+FUNn<<$E^6`%)>hcX2u_zapW57_ck=4qa zxBl_PPT?^lpQm46c}yK&{Td9e~ZoMeEG+fjN%{ zN>%~0yOLwrD7R{^!3q=aSoSJv_ghYA>y9lF>>&l+ZVp%TQ(ZQpOZlH@kd(BUw3tvHAUJ z1o95Zw?Xg1WFi{k;kX%lF5a!eI+jj>RJL612G!|S0fYxI*s3c)UN#maqFK0}UxsEW zoGp)y;+PJN<@lg$C-At;%}Mc=6QD)7#x^e6bNd(udI;%&IByQKc^~JG0i$3G)Pu3v z1uc6im05PHh_?_=Jrg2OX-dWD9ysw0th49?vM?J>4YR!XUC(p1_Q<`=l>3=Z zJ1;rpMW3d%Yd*}YXAPv7tCnhzIlM7E?4LnfzjkbIyvLRI;b_zbBcI35(0VZ0RfUHK z*9BBQx>gaCLOdtStNXcLjHp@XrCDPdG}w1nQiOV#@3xL8btwQ&gUn4r*E*IprO zkiJ7m46~&hbR)M#Mx~d!>2#FCdZd1D-05w9b`R)Jln*CSuV9SA&! zj^$h3zLd;{#2grX1@&OIQv0+2)8A9kM0{r7IF`wiUkcSz_kEy3J$U#XV7VpJ_p~k~ zAW&C=`QW2bl9_bJi8j>L4|TCQQ<>Iayr6KC?8g3_fU-}{71vT}(9GDLf?m1+G^$EA z%x9PmbinhIR&!XYFyl=xvq;fF*X}T36x3j}6Z)KH!bSW+kKeql*iEI?-5~s;0$GC=&86H+0uR*+2|*#hw#w_Z_97+vSc<4T=bYvoX7pwad~NO^11VvJa~ zf}xlsE~cP3~9W- zSjv0~GFcs~OZY-l=M7!61m#xZQ;>_OExPKAc!sfWu(!S4*S5@ixA=QLMKce$@DF>s zbM8K;jvjPTQHqq0O)c7u2zeoGKd})PJ*BKwsPpDnJ3Zih(jCFr?fus@ojjs4&V8l8 zL%y~#F^#rka{Bk_=Sd@jrJG;y=pf2mj_E9;Hd(&H&rJ$rEeN~hG5EtAYt~(DwQA^P zRD^XlDGTLsV>fPYdXI+ddFvOZ(IoO81nqHHWgdZhf zkCv{oac7rWXoj@%iiO_CdQ_w+p6B`K3Rg`JuOq&K}Kxarl=(gK99tT<|?=Dn9 zOM13WU_EPCKur*#kSJ0Sb(?JkRJ`ZCFGSx`>y8{1bjmFekGSdjA$hYJA#&s1Y*hy}704mA|^4T}tQieJLr6RU^ckru5rF zCCYvKhg4bkQC^`#D0;aQokb|1@Ai%wqP#$ z4`De{R0C;J(EV%LHS%EZrj{XE8dFX*Q4OOnu7@!sXe-7M70{>B)K8yZ}Ls=eE zUV9Mw0!^!Jv@?;tGxnqomOgdk$P+ZXcHOpRnP~9uwhnhE(5EqJFR;9o<6?(`3S$Ii z-`Trf_;pGwf9oc%z6EyJ+8?f%VBFhn^?WCp_y+tzP7`mZ=Y-sJg@Z6n13DNcEj@=N zI~egG!7|CuaqA_nr(23iDylS${w-|U{%@J3Yv;Q4w`?dZB4|+#o-#j)N-xFNy_5VG z>f*BrX-3&QAdQKwmq&W=?rQqNtQ|2}>ohlqyy4NXwq!T5+4Q5%5_THjndVcd=|uMt z-%u;0d3#;+3541uP-vK(G9V7){x=G8KG#c;5{#(6PC=LzO|+Bs$wBX<3l**^rKdkX zwM9RR3Dl=qxv)WQ5jhxyDds~n>ovE>J$DZKhMf9!Yn{}t( zJHWAUOU-qHut*pAFS7?J+Fv!H`TKXk{8!82vfGwT!;jANLc3zI)DPk{L7cpQFdT2? z1f|p-*xUjRaxOmjG~-=#yre;oq_g~~5^}tH7=AdeX?G^gV8v?f#6{^a6;rO~)2_Fl zvM}%8(Qmqal(_ow@(ze-W*B|7TTn-J!7R`KigZo}q_Gy~JgA4+C_E{@#DyFGK_?a? z#pbe7FIP<#Eal1*q7809WE~9#Vd>MvH&KG+5JBh$nbsg%fo4Q!+=)fpiTIL<1)4N{ zIRWHZa$mj@j{U{^aT=9wu9ent+Qg$O;;rTIvs9e*pJg58qcDnK7>pSCon*0g>;82=Cmk}6@rVq}-*#?RKe+~h zoTm|{#u1P!xhLzY7@uy2)|mFX6iK-%&;XoAI7LG%~@q<%L~5B z(y*9&%MD4A%j)mBob)2NNDM2Z4xJzu5##Hxp1N2roy%1INIE2$DhVO zOn#H-^lj5U;wXE+zi4UjwIbn08|N*F%lpiM9(cn~TZkMfIo@38+yHCK#(T6b(NKr&N2uN0H`GH1XcUo8Gfla7d(wJO6T zI&f?bBd7l-_%Z`C-g0mJd*mW|E?ADhmPb44zx-oQ+D0qL{!4%Ue;UXyApy%BetR&S zghtEpiD3sltKkYgFALS?EoeFKpZ`MTX56w!lqaiQrkl5;dUZI`+%>>KpX*5GmsaB4 zQ%@RZ_$mDNn3GXZYM3tXS9k$k5moiysfzRvkj0P%?ybO?o0myG)V0hFX97>knK3#% zp`}irekW`~UGQv{9?Q~&bw}*?yeSM)=A^Zr8kj_r$JaazbqGlM+HL+=Sj76Bmwh^q z5Prf-h!bZQ*Y_OBncm2=hV*w2d!Sf$h zv!1~em@$Ons3z85L+{pCKz$~ce0T}1)|)R)QvwvO(h7GX)T6J|ucO8NxLZ{j{P%C_ zUqA0%Oj}+CscdT$4%W8}Iz^4y|AOb!QaLbn_I=JT>O zC%Eyjh3Hbu5f{*}nKMGi`BR1LBP>S(vgE?7Z>Bj2iVvj1_Wt8S=JnzQWBg1^BE}2) zUWz&LqMnz!9Gn%eN*Cth#BE$vhoNe~Lp9f6M8_lA>3&|E&e`54?z;iKfphl)))y z=EBg(D&jxd0}(sydc}5*NM)^d2l1y|>@%9+0tgdf5JYl6f4Ttn=Tiu_wJ;lYyz$8= zls_Iihe@IW=-1vV-+mG2aAGkt8MSZR|LO_~_43G=cWxRA27;Eivr#grJVggMDGq~i zA4mSo3wun6h#qzm=$$Sl_6a+lN9apmG7*)19tmDe%+3fP=JvR z{>=Jvy~(~7U6UE0{(D3UycWP=ikD>*jo8XA34nnJ zTVVbS4btvGiJR4WbLTrCPv#wP>cruEUlEK2D6swZaqe%?+qXUT28{KP0)-A3=r_M# zl}njJ6X|~nCRH0ytSURQ+oL-nLYWw1McJQ{yLI8m*#=r0sA%zebn)= zJ1cleUWJBBT#YGgc}UCPN*hVc>>|eF<%Y?AGbcBjRI$;II@v$#9^_qAuG7E^Qo5|ZF zPVh!W@w?{_tCc~dnR--jBu9kT(HGE!u2EPK1QTwFu@}^1#~Ut=$#+1Dcc?D!=pORp zJAk%k^TxevIWNG$apO6+@vln`3$T6TYBzcXTtD6;y@XSnV9V@%NRto19=CwoA)Axl0adV^L6Of|Cq)5bn)R3N z+V9y19{$QJsKew~v<+)XX581~IduZ;w7yOl hY}{?pm3W4f&hYV%4gV7o3Rn)$(4a6sO6q;}e*jUyko*7u literal 0 HcmV?d00001 diff --git a/data/images/2.jpg b/data/images/2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74771224baf46a8ab64f5e068dccca662a49fddc GIT binary patch literal 49681 zcmb4qRa6{J7wzEg?gZB)xI-YpgS!Uz0fs>bcX#)ogAEou!QBZSAUK14xI=);|8m!T zy=PUw^yxmkR;{YjRcG&hU4GpJ;48@~$^j7Gj0=Ga0C-&mdB?P!orh^U8zzyLrbKtLiucpU)H000QcNC*gk|7%Ey$S9}) z1T=JvH@6x-01@Hs&YK~jycsGQ0wNLsnE>S-Dj|FZvm+h0B>3H}F% zgouQKh73SK`CqU8=OsWPe8(gG28Q}Sv^TJZHy|_;Mj1X$i>-_QffaARt^lx+-hvY# z5dfqB563jCKaX>xa_~iwit0$1tXEpodU3Hz>NA!$^~lW!jHQ65?;B8mmE` z9p8AtBF8JRHM)KbuMc9*C|_iqWr9QVbfBN+^^~Jhd{m5nhUU3napmJF&A~QfleH1q z3yD|yT^t5B2{|}6V$uG&r<%P}*xW-o+Zhqdo=JZdNR@82acQd|!T$AbFkBAImHJVdwU++_?r z*lkiuryWo%ENAnPkMdu2SaoL*G7-EUbz%|n(rGxuNgsl(A0+%8Hp;!Nfo_1yV2b0I zq~HAiM85-W4^12+-L(a))=DJjTM1l7_C_}cOA=G4{bebo(6~jpgw5xd{N}Dl#8YsQ zEb{tGKk08`=OpaPiqrbKpmQzJ_qLs>Q?^F5oO7~AAsnGV9IoW!zg>otXl}=okfJ%c zpUL!GPBVhMFkb;S;FF2SF3wcP+oNC0bj=X(0H?z4k8D@I`07dOFRqus&#cRpmE!x$ zb3Oeu_F2Lf_BO`sRncG85&%-OIYX;CUuUUl=ZRC%VEpX3v6kHq2&rk$jy7Lo5XJ?t z)8skn*XtD{sAd)fU(y?>Y@HTttT23qa6)sE6F!BCR8ZM)uEqExmsPdc&e;`)oEUsm zb0C=?v*5se14Dly!=%sta=$HoyqeK@GJzm8LKlpBfAk-84|ttv8pP`T3frlnUPpY*~b=GNek zZXF(T9G^Wp!u74j52@*ap z5SfAaeZhjNHDoW35J9qePDY$kG)2=#9@M#Q{jbW*1U34fJ7?dr-D1<+`!t z@`kTxZ4AQ|jKHmBimuY~j5^)+WN`j%3XC=Cxi8I(E$nLqyhxWhp@NGWn7fVkhIMJd z-=IqAsTy&8{kcrn>y<@WIMtF2UwYxf6@#S4D$KpEb7#MxaI|aa-9L@J)yBv*oT@^{ z08HOG30=3N)x5`rgWsdOj+G$eZnAfyjQqWAMSr?^Xm5zzz>G0=p8U3`QjvrkE(3}2 zSWc6AIyl6*ry| ze2m@as&AjSu0Zr-*Eb|{xgq%2Ztl00DpX-9%lnyh4yJdCcOUdrNPgl{#-Piog9%oG z1N$55_>k8-Aa=jm05S31vl>|y#&df7|3<_z-$S`$zrZuowP-5N`g_w-^Q+7CU5f#e zajD0KsS$GI7BzP{0cO?IrH671%WLk$k@Z};-0osi)PVn*sskuwH7>!v+ZE}5-%qR? zswldFv0wxtVB+KBudt>;8J{%ULlcXg2tRm_F{U}VcX<@)OqEP~!)-MuIi%05O-v<* z7#U&8s}|bd65v}r;<-}sh?}+;CwijiFZcjVOMM78(d-m#NY!!as5wuFb}3m!roo-> zdt`%MC4Je_Z&eN)hXbH5E1mV z54r;7W1u0Ll}UVyrS1i68SeD$&koxA=)cM>)OZsN+_p_r*~wbETqWK1IeWz`RVgA3hiXjo+;FThInMOMR$d14tkfH5gCpUWK}F$` zf!NdcxG$& z8B3(?2F2cE)E_}1lI@4O%AJ}LgfnaFTo*k&S4=+eH2~^Pt*;JY9eQoq?}+_g+sJ5B z3i52BI{}g6<3z}3)U`sxZOUPC!g~ezw@qd16}xX}uE5tD3E}aVl8&yfglX8@7zPN^=Af zt|fQBFVD@k$_gTAMXK#bl1RAGxgb%QY+&;o^5@tCk}}=A$JX@^zgKg<>A5-Z z`%`#q5*bsJqvt(SC?l&;E39e1mV&oL>^s0S-=VFoY7U0FSqGJ?VPN`KLrq<+LIlV1 z=EZ;Eh_I&Pa&TOFuSA>*N_T6Czf#wvp5li;LX;=+eyX^7NWf4 z1~-#9VHG@7H=b)xNU^W3#G|1pmHI(NN`^vDrIk?}mA9_Iapexb$&kRG3%M3rBkpzP zsK;*^I#>$({+(N9w(!Le@4!u~AbFUkm{wdq=uf(a=QZKy#4qU^(9OL)m$!wYacj&} z3{E4?+kva-`e#qM7a$5@BC&vv4QlQK7DpR_*R@wg=PN@;n2EE6$Knh>*dz_NQ_>W@ z|Jz)n>hD*|6N~Syz4^YuVaO)Iqy1Mta@Q87I+#Av<}?j z;w2gc>ZnwIciN??o@^BrPyO2tNEygE&6Bm2gz|w;%ZrM(PLbzaA4Owk;B3uCeP5BSJ#+(v2jRm4|EoV{nYEPam%kBg#YYdaFaJ-vo6@ z?#lUU1+v$fZYN{N`j#|%-jmMkT$oQu+~p~c2~y;}4S&nGG~W~T4LV+p9C;>58pN=u zCy2IgNiTecF0~Az?K)tC6EvwSa7reJcbQO-U2zs|QRCBbrE=+pKEy$LL~D94nZ9@* z8SZ0H-|bMoaXye6)J~@as>b6{DoIukOXoLHLW{w!?-|suxmTI^PC{=>!Z+aejw>T{ z+~mQZW15iGnw2E)6+jY>kV$`{OLa>5GenP@V7Qj!y)p$Ovw^FFQR)GIcJ;fK8=?O9 zmP<9c1Tb)c|S!SHN>*=iCQUQ%xNOk--lH?p){aMNNt9e#%lk2sXz7 zEUHy9l0@k@4Dpyu6_xj4h@-B@PtU{Ng1!w6vkkRY6ge1bFM35ix86argR>iXft=Qm zhWo31q>dGYz+JWGn{!h~(`Va~uAP5NvVXje;~4uVt6b3`fXQz#FK;kuzepf`_7$)U zklsr?G|;%Pgrha%Vaq0tNkDXQNn_D4ImXA>rR5iYc@1S|D;iL9qDrw%&$b9F(Y$Cp z>SVpY{*EeP?>&2k*p&BykUE^MjK({F=U=l}knf3-4pR9wXLjwT-ZWnMD}Zf~o1a-D z!LZ&9=TWyWA#Gve-;mxbpuSNwAibzRI)CWKLLJq2HUjsC*+XFMqh6*A=0{GIqJ1%) zSR}}@y2Ja+05hFL>}nonuxxB(iAnoM$#iSENGg`= zDOC#W;l0&EJv=V28iE)HxQ5SHW?ii#t-0{N(6EZ9XM&S{k`c5rcRDVQ3YhLPFHP5i zTUTXzhRw3JYgK|9{{yYbIAvC3WZdmuI|MQx^M@@dN%BStDpTz&Cpc(5-<^}#H&vpM z@u*mmbnGWGE{1Ek7~VfHAt-WPz4+6nT#j)G#`~L*;BVY@G1-FPjmQETo2x|Ymil>I zttvZC@0j$1!<9qKOo#+qm*#@9dNtgEl{9;bE<`!he1mm{$0`3Xc%9&=p;^X8#xv?Nw`HRIfOpGD`hb6;(mgD4@gzX7WpQ zNk9eV+G8%85MvJ{Msx*?vs}F*x~@g31^Led1(Qq1bm2x38MknDHU5iA5$yJp3AI{y zhlIpD43E%hqD;}!Jpt;!9e%K-7xM~8Q>4Y>?fG7OLlC+lWja*JJ~Vq|EX4#e8D>y` zo60(y42c3f1xCH=V$Q5`)0wZfQmM~}Tv1Wc<`AoY9}!`kTBU$(spKf6S>YBvbjzu? zuxoo)YiH%6IiN?>X){%5jqy0-65oPw^lEqsDNyOB$y+ZWX`ds{=vy>{uu=f#w2BX( zhN#R<{g>*vYoX1g++f}>(mrsP?8xtfqJ=sgzl#di_%fIjv8xnN+!Tbb86`JUHQnb> z#A#bM-x3#(B7l3gFx6BRVRAkHVBm-loAfVNfYE7<4 z?zv`5fNIw~>89+){kLWhJZ2&4;?&*Y1CDOL!fZT3d5+Xztyh4jl#;@xcJB*0+M1ru z$00FHENKc>hvskdi4^p^f5sD$D#Ya29D9-IA3Wy261)PuP804hdQVa-J843rT5McT53h%#IX9sB3AxHZqZH3z%>U?^OQQ&D8`<>=PJ*kv~Rj`KZEfViEY=RG4vEeI84H(xpWQ=kHxe~QUv!d&MnjQaX8gUj=yjT zZq+~fVqTMWR{H3kO#Gp}8Ec;OL`@H3gt>bP?CIj<=6Ya!PSNUjrjFLpl?D7KMB+E^ ztvTyq-M1t$)0V->gfj&gBvQZN0q(KGk&Zj9V6&;cg_vnOG&S{g*aESIT>m1Q)ee($ zwfp^#&?4?@B}~^ox;hL_uIWZ}{x#Gw94&5h1ozAtByP{&RgJmM@}-X9s+Ae(hb|FI zs*CF2K9-zXKtpgPDWkA2@n##LpEu2PHT6=iu=9;gJ+8~jmc@`dKl_v>G&g4=iPV~Z ziRORbJ89N2)icl5F4?32XUfa*1!NJ$-<8FF?>yqRfjz*B>M_(}ru{2rc4@AXH6%S3 zBK&|kMy|+>Yl(&%gE&X@s%nts!u4?KV6OMA$42*g`;eHcL=6LY3y-n`qKR|Q)HA0f zU&XNoC;QE_y)tVjCcToMo>qS6vBfc+QtXuq)8x`(!8~&PFMx}5M;qZ6-+Av$G2K7Q zFnI?)pxy(KSrp(FdMiZlS9)7f4j(O(Wn{0(i4hYPedcM!9qo$e;Ms_ed!swQf!eY+ zIKIUq$!~VbDR)kLgKI+{7|*zkaV8%bik^GlaIARWd#r)Dxrh->+(OSBz-3wMe=K)J z1uq`xx~9tQ1p>M19@K69gC!#Tk2Ii}J-^5Ax8kRYtbvtEuYi?SGBWe!%0B6g%LlbK zj35ariK`*+ka5)+Co^UA(H!9k@Ym9RCxu!2wBP^Z7_U0=TIs4}Df3|bXAV!pz!iD7 zyNYzcllZuxw0^rEX4$091vRCQeV73#wQkm z$gfpTK_U5b?yp{i%gvzK%GYi#zlz)6KR8a>rv#_oN+L6_fXD>0xh1*opKWuCjv3iL zpnna7xOnwQP{^q+nI9u=KI^S@e4~jtkd`Sd!<7o~-F%XzU+TS^0&QEv(ke@QyqGl` z%TodKR(C%g#gXoBUi&P2vWbZrZIo$E2?wz1IpX7Z6u0}c*ETjb(l~ou_}b=XLBG?Z zzfc|MTn$gH+|Y=7u*vPSi&N47Yk&cor!XdD%X78pFvn#`RCb?;>DOL=e*S%ikzvO!74~z= zpi-u4ObViG1`Ah`;DZ{z4JY52>p*n-J4!!Al5OrbjghL|ByGBHH*L@^)&oy*%f60? zSm$;F?gAmnZycP-N&QTDq4Cf1O0|URUGL=$W8%E!@nQ`VHJVxrK5&j@2qb@1nSe4j zJlnGz;67BjM5cUrqJR$LTP5)K>eByP1@F*Z5Y0k;rc_*)_|rI*B){%L2XdHRIBTyn zI)?q`y(Ha)N;8EYb$LVR75r2ovtKuDN(REPbEf%|yr9j1n_6}T&`r7DACj6)jb^~z zw!|Uo4oqNQY}V(;O(pP?$bFn<0HYJ|rhMx$=Ck0i|AEFQVZNK9?8KGe>BZE^CI2e@ zPm!p85cSC+u(V~8J%yB#cTc2bz);AXM-kW}X}^BGGzGsz@YylIbAAH=ufKjiQU(xB zi=Ew-`V58a6Q?}e*?XcSt%EjJQsHL6X5>~a1@0wCnkKn?(pe|##Hs_CAb35+h)$^h z0hxO-Ocfkz1V>v*QAKn37);zH+2QCbLf67MnX^PV@qvYU_)4$5ZF_0E=}_>1bKFm( zoXSKg5Nwi=u)X=)k7Wrxc}_UT-W<~ibfy96Jej!ry0)K5Q7&rq00yWofEpWP&izJ8 zgH4FdiZ7M9Gt|(VDH;j>MooHUkvDzXZw5J|&W1M~T_kAa{MRx)j%k&UKI9P1F;Odx zzLMgP1I(=zD!THxdDP7fX<%;TG4&k`5v}u`svH6gtOYnuC{9n3 zsKC6xgI!U3lq&Ip4uFX(O=Mt$rIjYgi9(|e^gR{nM>!SNG33;o8Q`JPpGlnEO2H0t zvdm6y2&J?z#j&BD@Ub#Ievmwbg2vIj<*tT&6q>;(?J1d_w>-&JsPvh(DQHH*zVja) znYo9Pwoon={aXCUYQXC6v>W20ZjGEYT3Yobw0v=ol+)pu{a8D{aecjPh#)HYBh9nm zam?H4qlQ!uJj@k=625@Xb|ACqa{H{}aYoxlrM(Sv{G@_9d#wHI7oJl_%f;14P$m-1 z@CTmh;xl6@_01Eo!cr|i46NEV>tbBNW7;?J8SxYFF)<*blX9rk!Dz6K(HgtCov|x` zv@Ke|lzpM;sWPu$%x3_Vp8Tp__;BuS&=*U^>PKsee$MQ;YP6?M3Fa8!KhDX(&<(V$ z$R{sTGM>&c1iRcM5moBcN6g~_R((gqgbeq5c9ZmBw#)A91Xn(7UHf@&Q8E3|Sp)Km z!0iv=!fu!P(2OJR9hdxVYgyLE#MvVL=|SiC0kYf0mJmBrW4Y{Jv|6FNTJVQW__3a{ zfk5=B?Jq^2S_5Mr=h9uajlJQDwU>o}&BDIZ= zesq`J-Y{HKEdLIz+;M3Uf-SC7JPb9Jll0hc$#mKJn!Rg&07)#N4BwK%c-yS))pxb( zdm!+D`G#Wm6#oVTXf9OK%)Vc?lMJwS*P*erZ(VQY{;Vc1`Do-bDe+5eTewvGBDnD0 zz|NNHuDUf&qpX#95LpwWB;=2|L1szS501ha%%X?frzJeELR!B&;gxZq;s*`yBe1^j zZ7JHwkt5&IVgnJIP>}4q2+@F>yk&vnRgX&R;GmuL7FD^YS&ZwIBz<@i*2oZ_BK)qD zP?jmbvK;YG$9C%EVwj4ZAV&|T>V80TvFAw+og~Jg;fL$;Abpo+=1h^&2&&o85cZ(d z7JFnTuaz|4N8N5i>7XwbsS`pQ5ba!?mhVKGNaQJy|HO2I2hF~Q8fgT9RT+BXJc_#h z&{QP2`p09R`d3WJVRf>kt=c577YdJ%lW)RP_E%a{XDEpax*am~305Yg2<#awE9wNj zlg-aFYyU=zOf4K95>3dVwbhdBkVYtt9idA&7XLo_ue$k;DS?k@Th;oAqZbOo>jPju zX``f}%Jh>DiYH8Vy(dU0)$>@+vGv>((L5#Q=meR(=NT(hGu&UNH1P2}P*_7)?4WZ3 zn)(E4OjTRyhYS#P+R;opJ*hu*oCO=6esRf)+ipz3^*l3BvHIwaeO(4snZ!}p<1b!@ znR+u&7zxxr+qVUDrIk6~;8UX=cbcX8S5U09q&4=q)*0;&B2#1bJk214>W0odbSo{N z+V!BIwonn%`st3J#C#$iH_}@V;mKKE<;P+Se$R;o4{2YU?HyiBA#Y&jk^~=1!&ik% zuQq3Q(IjmRm!txkHVTnen&BE5Na<(!ho6E!#i-4ueX~^GMFb~2X^Kxn2!NA#Sv!aF z^deVc@HA%HrUfIIOBOtA8TZ$kO_S9sOb`x36)E9G7#l(LdS*AZ=HDL`h*15h{i6qY zo}aUpt-ero?leB5h&M5^4O_d~$`;v~msj$JeUOY&70NG;;Ccm;EZ+6y9`G@TUmqVE2aEz z1%BJ7Ykxo7$Dy%FHO(#IAUw`{-s9_cMxPhPoqCyCfh)pn+Xzwp{{qPOgpcDqY7?PH zv1N7nOq{O7m(_yu@6L3rcfP1{F4U~ku?W+M@Rp*%(U^R#v=fR3ba{ug*y7Y=e4iFK zc-@4EFHq_W{GClKT;8u+wP+ydwh9FBaDgmnl&Lo-9|G_9%?@#cuP?38-VfRbO3w@lfU~G1S*@g&H z$#%D7*E9hog?9-BDy&)!9jCfZq1gjfx~fgj>@@++>kv`{)ksmh_t61F;VPP4Sl>K=2(ped0UI^K8 zp>-TBiS0~DA6OA8Xs+L3iI~7!Ou`-RZniKlK^C+Zzb}fIk`pRznDSaLZ3ncIvr4e} zP>9W{uFgVEX5)Bbb?;z2D-l9>WtW1YlpmKdj$hnUTfAzVZc&>@CPXPyzRDrcL#3$z zP36f#B@A$C*L7f+$Xlka3z2L(?;#u?KX4S^5WSlI*4qF8D-T6A)Uk=+Je|FQf^2> z#~y`nC8NGtIiU`fW}~CE*H&+%A?AxpiQ@E~%5i3oWLfYS$Ik}{{)t&6A9zRqk|y~1 zP%Yh_u%dU+;(Z8ZO+a(n9x=dIS4#yCWe~%p(rD+*Nc&h8qka^owHd@TTKu5_7CyWZ z6&tK^1gHnc7>2M1n5sIOV=&poZ6f@eu(u}#uV*AvpFL{miu-DQLK_zr7WfW5yM~(T zqBy3mWJUDxof%zDiYlCoYKVWAUOiJsNQ&E8%)l=!6Cv=`%;5eZ<4wNIBAHgJBqkqs z4dc*s0=VT&b0yi2MCHY(hjxA(85wZ1rw#+gBwtpd(j?3W*~G@s8%m`f>Jw2EVequa zy)@S`<;^uG=EUv-@L&=p?;KCKYf0XiGsN%E4AG5t|%R7t6;e`?t0KI)@ZoqT{^F7 z%!U8!TxDrAmnjWk?%7^5)zI1OMAFU;wO;h7zVfdgvTABiOVE0kptqMwgT`;e7vC|@ z|JdKzFEkkf*%uar|3uA)vTgm9o2cV`as7TX()$w9_FtZe$AuSHcYkCd{SV6%6YEYj zLNVz4e|Fm75{f%6V}Tk)){MLfs!TW4Ph#`()c4shy>_BQuyD#>dJ ziM=^zOhojlWs2ci2dV*60yTm*3rE4r6svUW=u++xbm50~uOYs<#m34~_>Y;94VIil ztwCeG!N&~}haV0d2_@ops{bCYaEbha(9BIo|4BOj`0K4Av226K->X6Ak{uEp_Y|rP zR6d7-9ibaOBblB@QeltA?`sG0uK335)rE#|<^7g`n+2qAWc&SRxmkD|Qv>ghp*DJw zIGmBwH=^5c`e!AHZ1eIW+k7chqibk;&J;Zz%PKg;Uk&_4S&lXEP)q9*;(n(fI^UV8 z4}V^+NXDv<<%bZQwZ-jvG&u2J zxN)9#19BLPn82{wN-}qG+@{BwoN}{P=t`qTKYN>Z-N0h#%g zX*qMI6>ApuwpGPlkm`F(>2)Qk7pmZM=x^rcUc;^pn17puas{|&WgzC}&JNOKh!?+# z@&J{X9d)$!$?_5g(O-g4PRnD%9Fc5vE~8a}`uMe-TMYr;8}N2W^Znkx?BoA&DP!x~ z$f*;W=!ZF1qb5%hOS%;dh8B9Tn>aRt_}X@y*_>^~wFH$dDz(t~7~(a5+@_ z`HuLTF<$;t?ORp{s}`pzw?M;igN`!bev%`LyG`QQ@HaA!o0|%o;4nn=bC5r$3i?TX z#nvW%)G8~)wK{k)EZN_#Pw-ILj{GAI7l9%H&jH@y7iC+iu|TDab-;{MsDUi07QSnI zVwu^GEL-%AGI^(8VN8T5z;7_ektA#q^II4?lXHwehZy-K=hTU^Tq!JWI6zA-X3X?h z*%CiAByg;yI#`u(NRBQeBgd~Rq_nd8UC`$o11gQ7LXNiyb^OH$1JuQ*shv$-%6}jP zbF;~(VkfOP6e0e_A|8*9Cgxrm#{cX@^D{==P+l;=0cVF_lu#K zz$vT)^-7S2W(gU)WHeWcfwvTyh67*lB(+235r)VsfMYj+O-%^ZK?TH*yQ1=hw=MT; zh+W=z$B*)KjSMB< zWU<)$DyZ(4*wQJiyH%bje!^z6j3CL5)EWlgWLL zHJ8Z>C&Jb9w$|3M%yq{>V@=i(Dn22tMTqJ)JmV5`D<8T}X?W!lRg~BZiq`x;d%oCcZo4{^hd^RmxPi6}2<@%5WvrXT6LtpFSifdR;50&BNV3 z3hTZFYt$FlTI0;GOzq{FUrkdO`%5St8!3jPcTj}LIVz( z&soznXyJ=)#|@$%aX93O1)B;Ivfb7wEAqwUGOH<3xt)D3qx<+Z-iTBJ{Dsn<6)<;W zjj1@uZmW7Nmxsh;KO2SG)M4YvQYOhFPBU~(7p<;{3T*`Ni;7<6*t6sm#eZ$SHpkT` z`fAf5qq?23Vuz{>WqGnmTI49bBi>yrz9U|DEQ2q@OV069WqQfoi9@Yomo-z)opEpK z;x$!=O7lK4TKq;cFD!cpo3~wQa{0s({JxZ!IopL0b9!U(hvl5Yn(N%)Y=Bf}R z<5-Sn{y49_9YMWXt}sa7^WF1%?2v_Lsl=T7kYu{eeKMxS=Pq_m!pb5)MS9Otj4a%y zy8&z4-Z;$d5DgQ|bq00@kG-iDqw5H`ms_hK3%fjT7Aw8g?u_JU?dnS&XL3zuP zo$5zg9KBpg@(Cdc+zh>|C+d~e_^R_RCpPp!JTfq3E?y0wrS1Vw&Yu{GiSDHpIUT^(W2Z8R+#*{bn!%M za%h3Ms>Qs;w|&Ga#La#1mjny3IX!MvtjOCurU1}Qsbm?JEj9;iADFk;#kxcy<&%oi zYyC2tD3#N+QmQZP0OOvKxKg_E6gyGB%bc<5KXRO0j3q5cpSl5r{dB8U6a$Fn98*)= zL$-l#>zsd$SI^7SS$I6ETQg3jHe;`NuBQ-gs_ne*@PKT)Y3A&m4F=Yt5}x{sdp3_Y zb<`AnYeqx0vF1=E?p3gyi`w< zeg#k`j31$Zq$s!N<)egt9|^YV5`3nVM=6^eaBKeM7j&7(b;&l$n(j%#09A|rWxY+E z;(8V-ikv3~Rr-^%p*U_lp5K1@UKMXG`9QI@Zi{XUcU37_;Jr|agz9F}j&gi^5&R~- z`wZ_NaOt*zT5(HO?W%E{Xq-Imh(py(;Q z6eOBY$Wsc1u>%;~<@N0NJ+;7jYw~Z4)5!6rTwUvQ~N8RHC9R#mG;-g z4xJoep&58&2>W4_a@zfkQ5JpG_p_S#)Pzu5Z=|Jz42{cB=a+S(DJ^QOJ(gScpx3_8CTa#LX`gqmK3pr;73hptj@ zm!=Nk^8We~vNEpHkoK>F+bP)mddi{91AQ2ENOJf{%kM;a;Z03Zb?>-&TLd1=Fbum; zc7*nAD;nB8XaUdL>*PUa38Hk^hrISH+}C6sg{++9h1q7uYfeTpxcScqej=M6;%MJ1 zMVxZWaiaC|eY2Te!&65`iToGixFCu9f-C$jD1~a$?(+a;jkmQT<6sl_QI(gQ)UOR= zBm;aX$nygI#MG%6R-0HHnDWw5E$}u(xNfh2Bmc~4I@8GMLl@JROZYvJz+`{Ql#!Iv z-Ob~Y*T0VjtA~w$9Yk%(4<5ZpCx%dd-#p0xowt}(#djX`>tBrDc(@gDgUxB|3ooHt zD4u;s@CoApsm6cQ=G~C(F2)y3{u7xoR$(q zIZngrfe7Zr9GgE+w&lm9FMFsstm#t64zKmEK1+(vwpm6wcTvT=+lz~BvnO(x zeg&JmtvCGr&H8TrU8Pk%$%ZO_A<(YY2 z+ufm_vv#7eQ?}#MS-hO+&^&--uQl@H7YlGF~xq-wR)L> zHwliIjrbyso+GG`Y%6o^6~HCcvw!*=U9C&Y-K3p*$i~0@a+~u6e^xbcT_R0=6APS= zMPeslHWtd@DWMH1@LkctAV|^T?&q%nU6A4Gp=g2sEkdEv zI(3RCE)+K}{?7@Nty5_G{LP&f9}Ry*3xl<`{B`|>bQ?4J9*M4cc{dX_6V)Lhf)bTs zz$JkyXANgwkvucV-G_wo;0u)_JMMSAG?lG_&-Uyed;YDZ@a5@-?a#+JMW(qZX+3JL z^@ypoYJI)}<;Xc2S%lN0+oGLF^SLZJOkfA*+sSq+yC&y7`_4=hJZXHDj0iehs%1Z= zXvwqytCYF<<{uRCV$@(==72J@-Zj}a6sM3|{77=?dReG7wr!?L3bUBE8-lRyPcDR?l&k68O|*YxmEro5grg_|>jAUQtGk)WT{vq)xv z=>!jf4r=3z)np`Qbr5Z-ggisHEL<4sU8|bB- zJex=ae!1`D|a@{m9?t=Buq= zFT!g^sWj&AS#(33PF}%}?inYhL8=ag3F%(LHm_`|5jF~&f!{>Rdq^f7Cx-pBKGPtg zdJ)$;!LupOIYDb+5B>;IY+kRjh!OtPLY9oW_@7!MjReym1=FTtpI{0Y$4)>qHK2K; znE?D*x2FtklAoU{GYcr~tw>QKx9J7@5rQKg^c#&`q}n#}g~p-lodY*c6uv>9-nL6Q z+E`@PmRwJUjTi?T*YNk_n45?~$-2rFeZvah*vb_2rG@NzDX?kn4p4Qt=M3ux-Q8tt z5<1o)5H|L4((eNN_iy2SUovIkUN4fRkuMh8(qG-`d&Z@uxrfKxd{&jpD&wK`LeI?g zi&sD)bLJ>Y)e$w_z02$iokCNr;ZBAzRc?J9bJ5EcKq2ampptto`zP(^z@7alNdCWu z1m=$L-3NsFFTtx4kqn`adJ->lK7(_Lj3PF?d_1Ef8*7J*N6oVg^43w2y%DdQ31Dq~?ZUCEeE8*$i zFlmU*`@Yo%baN5>66zPV#k5aewim>=Urc_{|E{X;KYFCrJ}SU+Hr@(R9wRePa2T1n zk#KZMiRlxzpVDF47s_~So!5-Rd0ccmQK~8b6!mjUf=oTV^ye&>b6#FX&L8T+cH*P{ zFLO=ZuK-qap#ADSepR5fnj<^V#8v-D1y-wuiV*Wzww=*Zxe8|^>Kw!kC0KSHd|Y){ zF-&vJJ(kC3D#>z7aTN{X_4kBVj44Sd+Zk!MjfZSme#qncWxdm)ypkfCHyesHvqaZ@ zL3*aMRjuH3Gx}};No>5zzQew)If>Cc2q!_Eyh3aj`xG)?4T4u!I8e&gM&p!Sene9j zngZ^}IV&?Xi8xy4%u3YX z`!p6XRM{$tea`>rv6t&wkkoR=oTEXL!$>O84?vd;BS=_qob~WE(PVWrs;He6;`#Aq zt6iTOAGDq@fkhier<5DUy1-&KR|j&soy$!eZSqFC5ZDxehGq&HmM4O_r;;ns!Wr7= zN0Eb-QQ$9Ta;~7{_^ZYCZ^-vWHQv6k!K#DII-uw);8$V|SpF}QX&m%T`(ScB|1EKR zH!=L)biOdZwk(%o1B-(0`CF&Aw<41uI)p#Vq&jq-eQ4$IUfdFGzF^FFVWCO*P0q11 zwR_o7dNG}SQoZU-elgr1FJzMO;lD{q<;4H~Fl$V#;Otl?RR;=G|6pU=NX=JD-=MYI z3BX?PliC)W#M6%){nhIHeUC{iPqg4of;O`mf~NM5=J>e!cujii=5D(5;(abJvCD2c z@nze!c-hM)M5lF#=3Uz>py!lsyE;wk1UH_dsmr-`TO@$i*`*}QIPik{u5xX4=yr9T zp^Lx$X87Chy0-IfM|b_hI~{R>KF4DUmZb#?#jZn^hjH<~Db>IB=#gTDiwRm3rNFM9 zCAtisKS7s^za~=ON-CDlY&c0O{L7&}zuGA#nNRJjZTa(rY8T$i3Ey{~tIq461|Mpi zB&l-R?ek+0_JPJ#b_=QI0;Ld>;i$_wq;h22kZY2bHWd5ncbfJzca;Dzs1U}qhV4=0 zK|zelM14adK)V$*mt9m|aDIijH=R{l)2m)~|Ijkel9VBF;5VGLb4$$56(pC*dZ@tbRy!Yg)+vykn``-~(~f^PL2+!L zv9&6;yQrN?@jH-bC93)|#a_*2w2eOkEe+nRAU9L1ifa!fcX0p`;t$*w*R1BfsV`*Y zesyVrbN*vMi{Jdq|s@qT|i-+%<9Q>rE$?77_|)8hwDZKg0~b{*+avOEwqFIIE&? zS>gDI95^cFWp4~MsX$yq4xpE}b+%yW|5+vhjLY^s%veGd4k2FL%kdb;xr>rYq@jW(w z(*>hy`@$Kp2FCqjai_`UI58miQT2~$K;S>*+iI6>sc&So)aOe4MW-Iho$`kHB#l1S zh4((MfIfvMJ(}Nbbl-Nyi=gMX=^j;oZMj-TlR_7Aixb^GRkISjkb7T+@>9XSMZM?G zC5HjmZCf(#1^TNJEJ)Px^$`l%dIaVTI%=oX!-V}`s&6HJELQ^s7xS@JW5X#ER8G9d zhYsF9oE7A2iYQH)r`Uo2q$_NQJOn>q+}3>yKooc;! zB9HP#;`i+z-qypS z(=>yF87*szhGpH{x4E^(B$qo*H7zwC6;+Qut?mhrRC|T5BOlRHfWlA9%mV7S^BxC~YpZtl;6>gZ z4zT@RgN+BVvA)XUz{d|LkEsH+a0+juQPd#QrtzYib~@UjPa+CU@g>}h{_^lQoL!QO zm9B5;$bfIka%l;r#i5R8pr{VkqXuUE=U5~9pQ@`oBzl{Ma6Fp7z2?IH_#&V0>Bv9(IKX1q309(@8rb_1e)`I>p(yc6T?4-<%S%Q!D zWrVf5kS%g!a)I`c(4fo(ZduhEPm%?6fAX=+vCFoZW%djil+=;4457*w<7|4$eUa%RhG)K)vQ zKL{1gsl|^}Yg$$I9xr{cUIBJ$1V!7AgazAW^@JMKd!h6zGwmhU7YmpyL;dXMNx zCjQ*$H7aWS=H5Ci>}-9^ti$w^FYFJHnDwsYeqr3SArwdX*X7QkUG54>xA_mUq?UUj zeuhV64?5rpv^s8G%ACfU7(AJrur?t%M@4cQeF)@*f!1+0Pd>*&bGPEl@-$epx!23w zlZ;j?m-$mfhExSLQe0heIo$6p2h=Z+4sJzQB1)kk9e^sZIIxB_O&8MAdjr0@1`{7Ib(@7a-eGyO?Ui#anz1orL?9h0K(sm>@ zVN|%QuRX{@uyjP5o8jxexq|qF*qlNg%X#S{G4)Tp#fZ{c@E19_Va6hppcQiAp|Zjd z27mdRHri@NBbE?FfOHg-Ck5k~O_(QdpCJwj(GdVY)>1d4A1#2cM6M;v>GSeJ)C=^B z(6|Kub1>pXmv$2@>Us_=BV-}6O${vjFb0hS5dZr*Gj2B5>k?k%C&Y7GUXnz`M{`Vq zg=jz8N`t1FRw}C!_4GTDp0TH&a?U!Kd_8ZXNVSJj27gDmaubz>ko=_6Zs15%JCZka z0;I^xXj3d6M}kqqL)a!#B0dHs36bS^D^&F1kD9YeHkC$NCU=TH1f)?=rQxG`R8&mF;vTn?#B~Hq{<@z_G z95A|!2bsTSK*&vlEN-i>?m%1@A84u>TN%rk>5<7Y>jJLT{}|)WgIv;EVT(~62XJGu>8tYxw5DtaqO5FA>9M}UFZI0QG8U|LM2V3 za7LE+KGa{s4bN;vagLoL3iIqmqvZU~lvEF8Q`be4XL7AkNNvk=$(YHCA1MH?U{V<^WL_z)PswkBFHKG-Dl?FRBi(FCa6$M#hQn4eLm>)rUK75N1`Z>`(UCdP zVLlWtp&^CG{{xpmXutlo-MCA`i1@cb5Nq}ZE>iJ4HxZPShElFK8-lDxee03_5%kBn z;I2JL3HY8=5czF~=4hr~y}FJ4sn>$P{i5)P6W<5YbqREN?dB3Z@s*8bSlq@?wlnkK z5HtCWZ|!3VO4>7nmoKMfp0Dft!1Cd+LSVmGXSZPA+r&HcgHl{e%!gQC0^Sbg_R{%X?cj1U-L1}=O3=!>#lwZc=j(2 z{9ltzxwU+g2=vCn!Uex~PoBWTZ z)=`8;!OQ0bk7{zH1w4c9d(~@9)_LSH2}7c;9HGu!o&Nw#5mIw%(c6%Z^GtUU59J>9 z-llw|hDdS^@_D^Up-93{mzxXd2IiV1mlwwa_;G=gipRW;)0tu-d9n6dG6@m5B zAx5~4Cp{8Fj&HSQ+RCOS8m}!gme*<7r$VibJ&~Mp*Evl#o^$-h87{Iqg0ip5dh(v0 z^&NdAO2jz`dW`I+uKDj#FmW}WuQ|1ju0P>5yX&d#x!}CCdy8JV8?yU> zS;?c8!!>=cuAOR3p{2AWD-}2>eMhZRwS8^Xgpgb;sJVFLQ-yW^03Y+G+OC=}ET%er z(YlS4><%T2`s1~69w*{!z8FdEFGnDBX(5@LmvOn=orijhcrz%{-p?rhCiSz#Ux+Pn zgMsBkZju!pKY)iG=LCMWOj`uDjU>8Pz(2ya&g^@W@9KM2CcfNL#kz*Qa*ZX;WZz+;PfCPc|WOY2*!6*fEU4Mt~`0t0cdo2RzPtxu6Yinr>S-{hogTr(=ME?LH zmE=Pa#AM(B)DU;}l;68;$1Qnt-@81+S-OY!XK^Vv8f`S(lZyK*J5p_Teyw^QMYWAy z)-AAk7bD*r(F|+QQ=TQTw$M0(P}1#MMuNgtw9JPjv}B{5fY`FOJ&35mK68rT>iiRw zUuJl9`bzGqk4?|>Vx)v*kSJ{z>04=c-iFs90otOZuxs!RXbH&!>x$HA%D(lqyAPUJ za8&jcB{ke&-<7%8(%Qo~zgaOlfoGM|5L5_(F#Stz|NbSsVL5ytu?risVB84&yr~+at$=`$N`c<@%vLy z?4XR(w?H~mb_C>LpYf(J1)PJqtc)-l(SmXgm5NuTIPxL9kvYljN>$pNI}THUK#hoU zH#K6w%tbrXjgDJEsusi#r$hTLY8I}Pv!M`#II z3wqH~=1fB>OJ#*kN^rbRn;Tz1VOIb&8C#tkiiH8D8IG@mCKIH@vXE1b~FMnS0U$4~(T zKn=j&jL|MSVuaXYar;pK`Fhk+14@BwVjZZFN4DmSuBn`Lr-lO-QiOm$)S-|)X~Q}a zPih&FDS$G~z9?I?q>OGZ;(X*Rz8hlxKDTrQj=<9`=e zTaPLudTt@*#uv>|FDKLI+uLr{%9?2sSm%k_+UgfsV|eytkxo_A_8@>qp{}>%Zxu!2 zPakW%LXb&s4eYAX!zS#gsHnNdz~MpKxjp){Yy4sYF8*STy)u2hf7Urlvx~1eQ|XtF zy!#hTx2X*_g&U>uC`Cy>J*hN%lkmIZ{r>E6ZPitM|3x&3e_~7q=Fd#cb1%{Gvn;jPzs7 zT#`Bf2c>%7OW}KnbA5j(lT(yrv0;d_{HjPe`s43i^|YNzQJ0!NPSk08grFaT#x3Edx$Fh?$3+^ax=mMzd{IT#HCz2)8(CS6?ZeHu#tsR< zHS77}H?eY0V>QA82wAx?kOp#bsZRIMeuKXp~ z>MV6?R5bdQ3f)em^NDv^NpP^=Z1p@__`vY;0ph$aAI7nz5O7|AO4J*C8@kY*!tVB(&lJ4ugp85U;ElyOnke8TEce!#{$hF8^(MB~*3qDz$DE#h zuIqL6dBI#$P?bEcGv6I^<$}Mj@H^^oc~>^J^Tt0bC@{GCGGPAo8~wAWa7DCfszY(8 z=pzd9Jf3W}{)atE{{YRBJ!;Tozh9c)S&hf{W%)_}0L?@aYH}ki_E6qN-AoO?>N?g= zF!(v7{K&~TdrjgyDD^EK`<^)lM3J4r?f#8&e-?Z%pN{xrRM52d7-&EmZY%Un{Po>N`AfM^kTik8xDz(LYNhcIqPpDa}ws+8{ zmwRU8mEje6Pmo0nmfVyEab4O_IE zr;A#$zUO#jwwWbE5h_7e&p0E~8-BG$(QGc)jJS&4c?%e??jwF+RI>?REI|N|zpZr* z1BG>dDZ5K86HUC;?uZ1%Wdlk_xe7KPTJxKTxLip`my_%5HneK#(5YFZoK{=m9;D*` z0L9LMANGV?JEUD-B=<7S8{Hp=h9(jTBXFogzcirAOB7}2Bhwsy{{ZnL$9lGW{%R?IiZEI`BR^nf(`Jz*aYkVsXYtub>fb(5O&cZ(xZL(udgFkKUGr1eFzmh zbNDs!BT2OW4UV0tVSnA)UXEk`0L%;hYiCk=m0yc%OVMNg=Mis9*&7e>My)wM?ov|! z0QpbXkIdp)_N_k=Yg&ebU%{?tQ|z*C2ZkepE4 zXJRR&0hV~kz%;Z~Wh8Z`EpAY6vGk+B#T2RE-lmYnxdS_A6^2DCicc^kia|kwb5cMl zoO0}QP1(UhF}*0D3TP}qCaj2sEBvEiYbYXM802QMr-U=3`I7T=G&v=cn{a*VSrtm1 zu}CnhIoma<_*oJ;ZVH38{`6I!BXuz>u0Y3Q(t{nll6R#Wc@*VM84&RSD}dj}rl|zv z1N+fDsnlouDgz`E5PzGc5#Yj;kbd zoL0UfX$b=~Ex&2Odhq-pFO)RzAM$|{lgYVm)CJR`Q^JxhwU zL1LGJ2yG%ro=Ik8=P!j&a0fDOa5o#&j~#wD_5D&)KLgt7x@MBO3of~7B=-|w;I}GD zix}AKd5$o0GlALIQL>|J8uvW6+v@u-)b#IX@BN^gcR*A4Lp@IS{I8cQygEjXggZOdxjRM_{8_CG~pYR$qQ zk8UTkxK!eu3rE#$$-_&enQY`y>&qib8|jt$*P{G7{7di;iBm&or)u8?s?D-#`igln zvHt3ulymH?Pg>bVhM9N8H;3uR;LeU(m$q65`7WaBL3PSgUsoPi)uWF72Yfr$I1_?( z>wQuSyDdU(Rn$dw14kviN{BEOhH`P&p$b0!m>0IXc0)@PE#2jBGDH5pwAVo!h#$)4>DxMW$U5{{SWhN<8YF2RG1<^Q{RP^N7#fioXoj4F&b% z0!^Tg)69^6g-2_84xhyFM;5cEX?l!tF=*26F(^WIBy$aoPl&RhgM&E{lLK1nsvI{!D|(~C@`_b7*$d10KlysP7Xo< zOn=0=AK^-Sm5(EySuy?OBvW>{hFtlO6YtA|`c(1cVdpO_M5}3WmS}C?+%ZG@)LrhC zW}v)(5pH|Be87L=T9!*4KI1v_6_473FAgF3?dO&w{qZP2){|TUDz}nqf&wA9hg5-o z?&bWzeNW6kN~P*>rm1li<(xq^8)M5fGVansavCywkLDiZ+|~a8Skd(>c-m!!H8o%T z*$Hqz<;FojQ=dxb8m;tKvwU8ia!vKKa`{rD34s~PNV|UZi@B$UX3|;cdE>(0!lc|y z_=n>wt|Yqu0Py@b@?S>^F%n;_$Ic6$p+J+Bh}@MRW2pDOgQDmh3B%$II?~5N(rwq7 zgkfTh2X$Fpr|FuXwT?9E zRoOzCOVf@xk9X$#eh0ny-|=_hX9NdH_>gM13$TYqx03Ul{{R{SgYLq(?z{g0wI9PC z9+kc;g}Bt)DFEEApKZuLTY!IR@wDn0uzFKnbA@#NBGIoj$aCBI`|pa@ zM_Q>V^AwTT&-!y^PK$A?DNh_{31{y8Pg3~J_?hs#@jv11to$LaNHxtO=t%lrqipu~ z1}0J;lRqnw@)8w}0+M=HpS~S(K+SSr2ly=T*0$ZMIkR zt$9TkdQqK%>^H4^QDnv{N;wt{`8?^h4P)f$X)Af<14u2GA8u?nMAwm+R7q*QB4zniM1 z(8Vf)pK4DiIjm9_J?UtptuO+}st8kO+Hmov0IADe1iyNe)RpC@m{w z;-ruOHitO^vX9PjlUY{aXG#dlr20~#=yGytMkyJ8En?cTZcg1RN~7c>4j6CNhsqSw znBwL9-6(8`&U%ViXhKzTFtlX1+y1nuVnL9nA6hD?2N}qrAP4Zl0r#P#AhD-{*tt1u z3dFMul1Tk&@&FbAoP$MsaU$axqDOCnccX;B!0YWuWO1@6209ud+w!jCCZ9(e$anoI zX57Yi82eM&14xKe{Ip~Wz=EQPK+jF9QXnIx9e@uiukw$*VU1)t0-Yp5kmP=plzgvL~UbRJQ?7b6bueTOB)R(xiq$ zU1;%MTQoNjtf+Uv9X-dP_o%^T99A!HZp>nYvgOWB%t8J0S4k0K{GGp>xvE-5){m$o zXZV*A5&r-wV&DBjvT4oX$2=a<{V|qrtHr*NuJ%p<95{MuF~SANP`wIgij_0qg~I8dayf)!jd{9>1~a9@^{AMzZjzOBHHx zy%%@-FIVVT_)x4Vlm|BP=i*7}))frFJ>& z@4jb-=GN?}&zC$PAJlFtjp74EuW4;-s92j@s8ytZf*DGSm95t&Zqh}ncfLTK_`W}RSwb(xr zJ{j6*!taY?vM^14h-LXP9PJvj^RDOSWLD3wC_QsrD^Jxl3vDRh!E`Nr$0Q>+Daaf3 zAbNMjae9vFR+4Lz=x<|gsLJ2Mr^1JyeDj}{TVF!cJLh#d-+Fgq>#173n-G^uIiP>< z5&X4BkB)350$y2|^b#Y-%Gmb~MLw|N-9J!q^L#v@?8!83`v$FFhbXR5=_wNdlN@&+(Rza}z7%gCSmo{{VaMwtwqR;gE&K^@l&T z$?})Bc9OK#>#1~>B)(04`7w_n!5e$h9$=^PnoP2Dxl>9gp*N@;I0m={PTTs5NPbXB zp~$Dq*nO#NtQRC=mU#?-AUk6@6sLs$ z){0qJV4v2XL8dlrH$jR_+@l!A^zC6zQhFL`aTz4zdXfPoN9E-2{xlwBZeRsxBMx9` zvs*~4a8J$LQM3ZTm(BnJlCeb0NygplX=dOTW7D+^APjJKsioKrn|Q+m*if61Ndk!Q zw)7byA-d+LgoYY8)sUX_nK@Q7y#(_p+oo$_eBRr7(!dE*obS@Kz>`EsI)T!O6}gm= z(-khDF)_N5*zex9Rf+T%qfEv=Wyejnrmb_!)|N0aOvP7v8o4ZeDGzWRPpu^<`A#ZG zMl%-3J*WyTLI0m!K|t61F?&MtAVhsw21MR zJ7R|wsm9H*?^}CZDfx%ewXQ7*F}qA#8*N**2_urjA6g_3>~cZJZ%Qg#sRZN$=~3GJ z#glSd#-tF$8dOoK-jN{X8{p!#5c7X2#wrZ}S1P##;82(5I~}Vhh51O%aY(gR3%Th_ z06FJWz$E9|l=obW00R^>Q16qyC`Oq%#(uQ07!3{6v;3npN#~irla_-N>l~Os3G|~) za@aTkAEh)jY69GZw_jQza&R%VHd(os1nwzIF$haPo4E;k0ce}{Th z%`f)Gudkasd0+Te=rZhs`H4FX^S8EZI@XM9$;L+7!q%PCr5U_DaC26PLv=LbAd8Sq zb{$8;?i1n}8f`0Cy0w~i<_$8Wu$-Ooj#3BfRlYayGl+On*>5d^Ynpq1{6?Ov#Xsjv zWPNuY^i;F2PcLp)p^sY5wGXu(3Nm?3v0+9r#uM`jV6YW#Rs=BW8>p=nW3eX{gS!S8 zb|G_vkxEGdf@lG952&ps5m}kJRd*$h1}G;^oa)I!n#+77y0DYoc~QwDDdj~hao1|( zyidd&OFhec3^ouel2#cZna@&pU;ronP)EI9b)9%HF++0dp zNuZWZPz86IRB9@DSlIdc^f(W0s@cxz#!g;5F&=qNJiO;TK5>jP&lUL$V6?Vn zMcB(YD4%TZe$?_g8y`IJvmfq_zpZcJpw4`yC*OK1M3MefRr(6(PbM$|N9O&jIa~YR zwJNfhgz_Kn8c8fO^M2I6#$qqq`#NwKX3euQ4b zCA`C7a}Gfj2{LivArZacwg~W1X=YVX#RbZuRyX#!d*d;>`@%YUVY$iCMJmR5m$VAC-@x zz{vZG`B%r1w7VDf}?1IU$WlPfAh-n_~<)^vIV5I?@k5+-^~Gh*n>C%>k;*w;b|W;mw_Z+cxuItuG!VNm9nRH(2m}*FF2+Cs+c>RAzr-gUJu1#{ zA(tTp>0jy-To$4a><&i;))ty%FF>2Lxj@ zrho|fSFdW4Ks0%Z^v(rgyJa6H*`md29a%}MuK%lVn ztSYi80QD5`0~NJW*v|BW5R4UCc#-)$+|&cMPpve77mz^aJDN)dLTIBbD^8>mPC90$ zkPYS~KpeZDU8^aLElr52ur7Pj?9^IT^kbNpq%jz8r)2^qI zcFJQOwEjsYa!96jsX2TFz{M&^6&76`2Y;gL7}#s zUQMJp`qJFrVUlZo3}f>HS@ChQu|rHBN#tLaLUZ1n#bmHTGTAukMpzDGWn>v(39Dxu z>W!!H^`(>?%Xqf&fy=fD0~Jh)CNi!%DFptt(70Dp(Ky43E@P5dB%UzRJa74zmwukx zR_jbzl$p_Jv0LXX`4&hn*F)0u*kNGPGbAI|1M?ry6H@v|jTgEeqL!-C%8~-b)O1nQ zilyq1!>Q^?aHX+k9il}z8DLi@>}nq7!t&boOX(z22bb{hpz28=vmelYwa_VTM^)ix zi7cICiTp{>b^R__-$9d9zQ345n`>;z&UOx14T$TKdsiFQw5yIA)Mm5Pg4tJL5f6?0Op4+*-!rZ+%fhvaqRf7~R5Kd+ljI#XSB8mT$PS*4(mNI?X0OGi6YI z*)>_e*5K?Hqj0`5T{&W>@= zgIZq+MdKWv`J!k#PMHYSvZk`ucRpp$tLgN?FE#HjRS%SZyr(Dp$LUfWC7|mZW3F0g zx-F~fI)&TvMGQkZ^(U|ZZb`-mTB}$=f34ofV_|IfcMNc`#xj1j=wA<{)%+*m`Qfv% zn@`sDK4EpMvNFr$gMkc0fK_>c<|A@I>5c8D2x!tz|c zo~Nz6CE?vGz^xfHTis7Wv5(0|ySkp}B==V&Wt%KOSl9BX!8;SSYV{9_+#NG=jxW&p ziNFH>FaH4QzII<;Jd(T4Wand&@*TUR{*k0F<0DF9MB0A2AG|m5K0jE6d7=b{B6-Is$MaE6G5-K( zh8+I@#%jOr*MIYV2*`fJ=s3cl{{Z_-_dQEv__N_#ll)ou!XN$`+c^Eet1h4TkKqfD z7;#q+TaQbvJ~qexRj(FKn#6&Gb6di_iVB6kN#G1`HN;chLvQ*DwmP-Xr``Vma|WmM z{{Tjx+1($MzjV)kKjJTi;2hC$?Y`g1cce~F>bU-t75rE5)wa@Wx>eZqNbaL2>|sCF zybr8UX&8NE zV7U7u3fWM-zPSGYhZXqa^EpjV*?(p|nG9$S-) zr#CO^;cDrk_@ly{T8cEzBCxuVx?8lU&;J0F9MuBb@TcMZwuO79+UbB^NeM;1o@bJH zn1jmWaJwDxft+`*F=<>eb>UZtc2V(ml9Q#Kz-g_nBM%kCS-~rjh8Wy?jrtnNt95m3 z;_D>S?z6$qSm-(ywHndWs??20EU3$FW%5fOF1%$P+v2Sv4NFS8x4MWuoQip#KbS^) zHa)$o4=)Y)dA7z*p4H4B}o9*L3MygakM7 zK+2L@>EU590x}6kC;<9(`&X2|7+w@;TuXm-a9~I#ytP|f+Y6-fX5~g^j!2K?lLgP@ zCj<`J#rJbjqv9@Cri)0ZBpWAr>l!J3n5T10NyBP<3^{dTF; zjZC2J%nn6SBhs}G3Am5@8&i>G)9#Bst>-}??eskX9^22j0l1QXq=3Za%s?2vOo<$*_ zzS*a6+^m0{BXUN?hT^fdos=qJd847Ww)(sgh6SfVk1!w{!_#6$-)e5G*k)slYyqmC zo2|=hFlcR3=>gpcVNyjGxlF zCWrWNw0HwuaTBY5bXthV*_rW*ojYo^7Go2_V`Fgh@HO+`;>4pk0|RaABuJd#k?mc} zRs1vQwvdS*iSBLkGF|LeMaQx87OIyX2S;knD{&>f_QP~|ASIZ8%1+;{Su0A*{Ygf` z;qan!pCLp|k_iTZFjB+Q)K^G88tDlbNwt}vcN;lHEBa^rYOlpS2R?-d#6f)>?XNZr ziaA+*GPyf@f@%}Ct4(fefz`3NqaD+a1#RNyLCSIONf7f6xgv_Gh`?t96a#k@Pk`C& zSlMR0(xZ_17*SXixv;*2?MjvKpaM6h*ApC{Eb@Rodeh5cz?mZeyA#vBH+6R9U~&hg z3Cxe&h0p6+n(9@7mNAwGYItb|M@$%^s^pq%?#UriGs-BC-^m1P@)F#%*)HY5%KX{f z)RTDR4Z1Wc7^%TEwvg;pZ|P2E)Io&h20bxAhU|G`nE)82xr_pb^9z43UX-aORXl^Z z$)m?}A0vT+M${Gv-#EtGVy2i3KrBgNjP|8QynvCmY24`!0~W)M-Km_)GN+he)YI$$ zrD+|Q0gw*lR-}oG4D=M?8DnM{&o_Eo#pZKwf@r`Ez8ydt^xqU2hI8*rx>iNz=G)SS zlK00p)ijzK7Gc7#Fgf+Cp|#djk_HCVl_im#5-V0N%gMJ)j`X2u@i#9i!u6#^cJYa1 zb>~x#TpDnbFKGFW zzItpn$?Hpo>0OLOs3UFr(=Y6?bt3aOB94V=#lD|xSd%BtHqA#5A(6m&7A7ArC_TH; z36w+xL}9xM50w%FhUwchw3ZVeF5u_$4%IZ6hc>~AuyA%AX|aSQ!6cAJV@Fw*3zLU3 zBMbpZ8InMxl239dXl52N#^2V558;7Ltv{Omk3Zh{nZ~>gszs=3_S5OnhUN1=Dls|Ows3lMtBl`^SG>@s zhf;<s zJc|}(izk$@!6UK8_^8?7w{m%-Xpi@nxzv0`tyq(ZWFzaEa}SBPa2#C1l6vzl5A9la zO}>Y7AMuw7V37gjb)5%h>rCZn$o!=n^%WU!;zpNn2b~MTN$Rbdcm1uVYA2c-~rZx3$ki$F(fA& zA5l$)n(#3(%IHV;TNLbHV0_!sh2k(cAn!uTD`^c&OCIkdF+Itoz4w=rNzb)z?m*IQ z=|Yc9d%{OL#ygT~&O4R!*ll&I%Q}$qHzq$QY*Q%Ux1K^-3pc4XG};E84)-^KkD}2d zxY6tf@S-wfxioIaL8$qYJ{;@Q_;^fYbKT5 zdNFW>u$iU4(6s$Zy*rzlTc-|>FV8%r@Yl~LvA+oXAAdF7-x^7&`L=V3Z5j_T+DW8R zrpIAK7zFWo-?ryX6F|Jjd(_!sP5W72UrEy2g<| zg-fQ|&Zkhbx|Q!BV}?@g@;A630iRGs6OTMv;NJnQZuE{A)9vqgf_G^qh7*vI5=ncB zI+K&0T<88#gIyD&2V(^(?T1(2FaH21jz>_rr|l)vr)}cbUkx>kD8*%E`ePjTPpUlm z#J&UZ*N40|+TN!2*7oFtmMAZ=DuQ|tFdKYqq@Qi6p9D+c^x94JuNc{DPkQH?-uF+r z4ax20u>(Hmn?BgywZ?pXs&THbCa1(&r^Q?yksa`n5ALFLQS|!mY6d+{-a-`|u=Q0Y zxv5r!DrSs+A6`#dqhoh*X|qtMyhihyd5%3ZmfzQ^>|1!J@c#h8{w(P_hL;x;M-TA) z-95Efei~JjWOA1UkR8SwI6Hy|TJfhCaF-Q$SFQg5W^0hN5vk>mQdK@&soMvjBoAHs zf=2c1jt%2e!rHVq(Z_Fg(Hw}cErhA;aC#Aqh#-pT+)3gm3i!pLO{H<448wC9saWr9 zB4nBpcR!r4a8Bh(!Ryz0&$q2f!KTuFT;uh}<@ug#s^~ZRb>)L zBzMMY+Eh04CCQ`qhg+>)r@PR2g6od2KcNGy_@AV{sUDE+D#C75Gsc)I4&8fps9q}Z zzCISahRvADUj$m=q9gljbSX*6j1kDZH1}Q!rg~uajkb}NGdQ!9wCX7dS zYiQ{r^JU!*LNM42{WDbD$S+_9zd6eef z<5iKI-|DY=0Ob*+ep(NFWRiaL%1unDTrg)Rt!@oWBRO(Cz^-yvm1;%8QsCw4UtCpA zGnP>#r=OTGz{WQ;+DmAbTmlH{H>L+udx@DB-*I3i-O|bi<37fj%W%p-+~DSp(ZC$OOCH10gCfRN z*nGPKgX>aQV-ol55N(5+(bS~@uy-Blw2_Gc-0$hykg+nMbZ%lc!KS7Gn(}6knYYhs z1b0%S20xd$r8H(l;Cl4%HU1o4*SrKa-H26?d z5z%WoCXL4@Gcnv8R8z=<%91V$799SgrD2T5tCkM=&T+LixxShigG?}kVh3t>4ZwSE zFh4Ye?g-l*sprgKx%r=$3!L`pTPZMhZd@wunj~_y#I7Cz5KraB{PLx?O1Y;kUy(vp9K}v#8<=+{py$6JKjdmBDY40o<{vJ;#Z+wz7 z_NyTY)Z!~F&a$(e^=Q$i($9AAm#bu5v^>ZpcOoo(%{IA~DA|l?6rTO+M~O5&b43jI zntaegKcAN}Xzikq-NRT85aG9Q7MgFe2GKE z8Dh7P$2a_3xId_*G16|q>k&WR`Gqo$DD@bOkdn#R9g6!JkGZpZbGX6ws}e=K+F3+} z&kWg22|*hVR5+-6jto^|VVYQvZ9;IpxSF$j<>T!~bSkx3eGkbZ3aO;nMUza0t2)O3qMj$nN`MK^vav52284rBiS5vYTtpgWFZ z?b??Pj-(&q{?$FJxgGOl-jCvzwZnH(+-Nsb=~oXKiKeuW#>Ed%NIfiobGAerj-<5G=Oc}6!C5qRIDq7^~Ur$akPwSMM>0am(}-p0cWHnA3ejFAFd5L zxYQQh`7x(F_{fMH1s<$_!@Wn|>P-`Q5Pa86Grvg3_mB7r1+Oit5Uf~}%+A}hpHoz{ zNy{7}K^9w_i;Iaf{{Xv*iXi(9&)k|r*jpC)p+ZL4BY)O}E?XI1wYcvDgOvQw?cS2f z6xVV@HN1AxMsKcd<6+8I&Mu$%8zc75y>qQ^OVN17c{JT()g-)x5gp7wDP_-9Jx98n zj)#8ME68WLn+OSWuvG+;_NX0Wg0-la-zAOYav|mt2pAvfSulj9%yXYJ&%LP9Ej0>R zl+`-1`!dQ;@@dt+Q zlTX$x&5i6M9%ZX@B#KAoQ|Nd4S6$)Vc{N2A@sHu!j|C+q6vAi z>)MpHN{zP&q>M+ZR&DTzEeh{lw7!)^wY97Ti82|v-<5_6^xmWfn33BEp^aiw`9c5( z*izoioJb_d8y{`@(xpiw3VFQ1yMVob=~y+cD%?EGz^!vq;~U>WtXgNzRht8)Bl|W; z=6{?MoKxS4$>uS9r1$lud_coDmC23Hf30Gun4>kbdua&BECv{w*Zrb6@=Iipr6=C<}zDq_z;LXm`7g{9oL=hG_Rfril_vuaIk7ncq z?}n))0OgAdpF0d&IHeVLkj9e(oCP3OJb#gSIfAzBT9P$W%^P563!e0I(E+38Mga4H z@6*0(akHQttgXs&O$DMW=6Q!0*cuAKAr3kJ0AEU!RzNQ@3x>#6d|-D1o4vtV5P+a9 zE;~?!stmE-hjD0Jl|8UdJ-bxE*pw_T?T%t{1vY72o!Hj1QY~Pi#|2ZKcYyHz>z3J;>gcExh(E&4lUJ zj^I94M2(vq$POf)D{aE# z?O9Fcjz&nrH$SQSR#mGV$s?{IjytXS%!n{UFDmnUQDn6mb$dHiw>83!4J&CCqG|m$gU$pF_PeUOMnSO`j2XcX#O5gFU@7o%EU3l zW4;E!^rb-$IbSALC0Ro-Y=#*>r3!iA2kCKd%{1K4BuUkP+?;<(Br;kkmLP7q(Q*lp z4av?2tu(hsf*J53nPfeAx*hR=GJEu(`LYy|RZ(QeFljw^&JI7VH!KMfw)gR?g=Ucg zfI(t6KATbP^~mQXam<9^ILILSVxdH7Jmua(2pqvd8DZ_W*R?vdh$NtnVC6|Hq!6R7 z`S+@vn;#?dszGea3$rTWkT$7S2^0F)HjtmXTzzwDR2XUaV^-CG^dqDbIY6@cCA9k;j*&1H~^qM2iBTg2eC+% zWoK}5#Z-kJy+5T6ORI=XsIHF`-8n`Aoc{nyTe30F#K{jQ%*75=GVeyyZRM6xhRmek zF&%g7QMTV178Vy-T;SmJKTp=X2Zg*A^O*1T7}h&=+#o0CKkL0@+tHP2?TLLFQmZYo zBz!#Z$V|zpU%+Moxs_jF-(PQ~br@~@pr|=i56Y2^g*RKqoS31?$Jj2w3OcL79tkD= zSbo*jQiN#7WDd68-T|sWRlhVLgl;==Hdb5|^lim+j}Uwo)3ubA>qkVey9X?Q;we3` z+v+Q#LnN62j(LVQ^`RM^qhXb9;C=hnQ>O}%#F4bLzq!-u__ot=e}nn1=ZtuPFEmTp ztnQ&`B#E%mH}xOqQ1V^^K~~D1_37UhxE{}pG*)zoN2x*yhGhU}9YcFplsr54lJ zPC$_ls2#_+s*_91%yUqh){`WY#i`go!|K+Do+(D$-l~0#T%j2(J_==4r5}zQuIu*o z{L9Xxruh!TsLxuAYh}sF&$TqO<5=}AJ}Z-u&GN|^>W(%b{S7TOSgs`bRHolIH9fMA z48O*uQ70GF#V02=}6Suv#?;XInUmz7+1IPFC3rnMiMRl@{qe8YSV@@e(2 z5!`Uzpn=%QX)fs5$YJbjpU6dN z+jn91XL!H{}NdEl~E?WxtxFpp00)R*0q)#Z zpYRltAu0Als6e+?`HQ<8ySG+8riilVeBe`0H#y#s=Cd~{oDQV=3QZNJN=3~!Rx^Rz zitD}_coR0hsei1rwiX!VnAmlq|J5~B<3kf5abmM!_47pq+(mvZK)Q z=TdbWl-^tJ&O5F68~vsK0BW+GEpHGKgd7rgu2ZO5YT5>)J)PpTH!+@4anyDr*EQ=8 zF_`6;AnX9ztDINB_PkspRw%a%y335PtUH=2;yit%Yi}+5c;~u2?{73x12w@cSbV(R zqMO`Kmc%;50;uKAcW-)~;vODt15%dO?=i}+jn^A*Od7@%Mh$SB#QdWhs6Bq2E0~t( zwwgId9j^>&J=BI40TnmM2k%3+V}>OPovB-7Y{{XI(sja7mK_QQr zW0Ygu;~1!^G$3~YN~B7pVmRBaT)8e5B_YKj_nA z6og7S8PUqFM`~($U?z2FV<`~|!M7O6`u_l07*io;ESL+B+1woBh_;hl%Gr|$6cN4% zJLFJ>xJ;>FeqWoI{ZH?jl9B+D+WeSM2p{>eLX9DKV@5_iq>{PxtXq^|u9<9Z#)a`n zrIRg|Qb`*CKhYG_lYD^UODN)uJ}@~?C>`jM+F@P=S4@E0ckjJN8^$q^9gYd#u1{)1 zYVPs|!!zJ1Y@8E{nqV2N1ap(j-!J9C{17|wkFq04yzg_JR7 zQdLMBVx%$BUqZ!sh7g~apHENLg)GpwG6DupPt&zE@*ud7#2vT6&U&>LIPBIStgK6X zrH^5nmmPq~k~c&1ZXZ8EO_s$UnYoLx{iuxaMC3Ba%6UOJ=r_TksSq|nbzG1GeGN5{ z2ar0aE>|mnd3z1(DXrSPgZN~#k<&Gl@WynHA}EPhcjreE79apg->y54>Gh|HVDB8} zBoX2B<$-|2a}BZo0ABRQ^6BD@p)3 zJ!uhLq}GUDDP}ASWfwV2Ha8t|Gq=+{MG9q{GeoH%jG-88Fd%F;?ScIV)}e>c1-;F{ zxOmo2HA*QG2HfR;m}9>G0JR>^TxOj^TL%7jM=5(kP1ojMh7lM-y*K)@IT z<9z3FOC}{|T=|TEg7J?~1_=YN=EiAlcmQduglS%DC5~A^V#VD2O*2PBdOJNS!f5@`_;<5EvC1wa|?-+y0vX?)s^ z?Z=ZXz8lLN@7KOEG2bJv*F9=ugg7|rH{7K8iL$Dp*seMe_x-BMRQyRO#Ig``l8P*R z1Ho`#-HXm7hRwTDE{*~F|K?Sm+m5n1@Dlb3O0YIEbNI9 z3UH@lN$XeBMr4_#x#m)zm9x1t%Hr)64=Cl|2iln3>H^;`ryuZ|fz-9!#1&iRa7NXZ zrG!_2I{vm*WVeB{B;cq%o7X+!uLT=lvpTk#;w@k=E5>$9r5V`zFznxBw@#0t=>qOg ziYOXPepWjQuucB}X0^nCWDIf+l=i8nq+M-wR?f6*PFseW-|r7BF(CPz z{{WR%t#xG6VKYeZMa}u6kbJ>?hjUJ}O%i?}zdF786od?0!r#iizgo=jlzu66HF3PF zRd`f>)BF?e{{Wpvx;BM(uF7up588);lI30Wpt zCG8YvENhhU4~aMW`!s7W!CLNezVWX!PE%iLR{d|sr#N2sj zWX*4G-h9O4mObj7Hn%*^WVrqxaHQ2ub>*^*t)wAb{%Pbnb5YakGu*Kql(~pG9TvZhuj}Zv>qz8hfaIxh*+asj2!ugupd*kdH097p3_jY zFf@W&exT>)g}q0&wMtK@&3z0Se6te@%qCVCD#UI9_CMCNH7wG2d>*rFt~pZO7Nwq_ zKU1+;T*-9pdvrhT5^e`ji`hjz67w48`cAS@0;JA_Y-5vgRy$o_wmjJWU+*Ev{VQEM zZ5#R*keVmsZf=|VRPPCJ9sd9y>VMf*S&rNgZ!WR2FLfU0`c!8N*vZ8jot~Mcx=nj| zAc{V1g8Gx}22a|(nZR5bXTZ7zwx4SfxdSrF-=0T5n3LYFwxhZ3mqc=&)a%N^-`%a> z`6jygFWvSlS`Lo<4QXknqVie5fg@GyqoAwanW%|vU6sy2A2w(@{+^fCk{FL9hzcPy64BI+nwN0(tr6@`zU5I`uiG*9jSryE6^&0Q8`XhLyQvL-PLs zH?>9!46;cyG4Vr##~JA!axsqXCHYE?+*g|K^h>QG@#47V3>1bf+t=&Ty;W~}F@8pf zpf~`U_O}Gt$p>r`1q0PbCFNe^@I zfs2<^2LLwVPEYN%kEm-#fV{)2Cg%^H{g@1S=B|W@D6X#?(gCu*#lSn#vHVKmi1wLyfWz??8%VRhZ9j zF$_Y9q8Qv~A7krM=mwhhP|lJdXuumE%opkJy=r-FuB1}&u2_NP{#^Xk<9&(trv}aj zB?1*FxZSop@BTFyB6dYVh1?O!m?Vw&Kj3K0CYCn}`EI1EWrFSoK>E<5kT-PJ?4cr& zm=ZZx826^lYFEuBMO@{;>^(oFZB}%Ah?+8}0c2c~H^|4{h2~75%cnc^$x=x;AY<%J zPB=hev1@qAenpUg1Lt$N+wWU}=8inZpgQaU8$bSrf+Um3c*66pazNZ;uU}l#caB*i zC(KxoHU>BDd-mLlmqCv((!8E@U!3H>ZTCN>MQTex6_^4Fu3K)u=u%_|V`PeE42q;F zJ$Ks!?OJ_9&60mIc8$pScF6ZXQIF8nqY(m;W|fKLY^G2Hr6Wu2sfgCJwHfQ|9jZ{C%0Wd+1+10RqCJN4Xxf1sv| zX>o4ZOM-zU$qF(_1J}@cRgnePk#of~2$KlhvgC3R)RWe-n9Zi$NJZ01@)D&9c`!PT zm6c}CQd}Kk=$?N%Mn;KLInDtmnQ}%4L4Zg&&U@!Hg+`H;)uoy+1dZH82^(y7$8nvr zy*`v8+_JO-o>Vwo{{S!peaE0V-#g>2GqrbU78^@r2bfr~LOy+$V4&=J5>9_9&NEuA z<<>#az3Nn_!#+k`Cwoq-IzgnOo17lmHy^fsUh>n>**J9lutwfeApS z>NIr^$QP59NXrc5khur$eJJumE^V9%3&vlVnOLz-#Hs2585z%Eo%f?E~&bssPQsT{1q zeRtp2Am@Iaw=~K`oDi`kysSA+3-c*B8Tkk!J7=XpWQ1C}y|We^jV^hZH!`OAW*g)Z z8>Vs4U>xDZI?~9z-fD8XU^_@c5(@&o_R5{x-`cBi>9^3aR1)Acg(4u}6&cu#!61K_ z+jqS4e8^fhnXu!Sa;YQS@0_+WHrqSWc`=&62CrVYcMw*kl4pb@=9nL}iUKxC%olY>-#JbCO91)9poVX4+0pQUetE z@QTf_V}ZUw!5x3jqD`w@+rOD?u2ICB$CcY49OF0xfBI4{r(g_c48cZXWx>X9xEUB5 z_1nEgw7`}+v`h-g=E%-D(nk4950nz!hy#7SMK9vUS~w)IQcMeCMxRZfm!2R}ZzGn3Fa<$v>Ny2d|}a&j@sJsD{<<5E$WLfV=Z$ zj_3UQ{cEymnz~+UVg;9>*?R%)T^65jvK}ky`W+UZZHV}?C9SOtY{cY(bH92`#jh-l zDmLx@6)kf=l&H~_F@9Y7nqzl7aj!EG%-uz7wl;2eUD<&{`a85R^bK(z+w%R4L87@LMK*74#ow&d87Yi2h?m2+S*!AmK z5QL@nQH@e@huUYM_~(nX{X;^!vYu3E=ZTe%UzGO!Ysgy8qZO~2^NdE^x%zEYjwJEM z<5J3`m;+(H+gCEyc((rlQfCb*2*)H2`_^Kzl<|{4P1fUJsw`zc%)aWpX$rZ96XwcJ z0d0ez(~B?>wpPqBzDG5G0Cl>UgyKjYi^9 zE+@LSS$wHEhbnK-a^UCwBi6jn;pK!9YC?OHh+@We>79xFGhV)qD=q@m(}^v)wQdEx zD0LfaX6Accz**3LGJ;4Wuw&>dyO&OB(4QNQQcii^xx>%Q%bo<$pt+JK(KPQeFx<-x zq89{h*?MFTvHf*6g{DAVt*orA;@K1=K<79Ea1P}3#y_PsZY|NRbvbnVo7rZX>Nh%W zrKaLoE}J~b&l7;@%!FqJx99*Epq*A%XLzp=D)S#9l!o7+=E2Ay9kx3RSDReWuWc>0 zA74wiVC~i6?WnzS;JvO%sQ$wd>g;xV(-yBah9POt~$Cje!}@Q;z=tZJuz{ z+Mb?aw2MGjy}Otye`-U3@sPmmKqEZk3_5loZC2h6;tm{-S=2P`6HBnxbtF?1_irnm zHpuf5=QfdX=LZ&PhkVp9PevC`~=N$8N+U@J|8ow*YE7)~mx7 z?{jy~wkdf6EYe0o{QC^2%oqmFPBJ#lc{^Ta&}Sjg`7EAbRy&2>y=mj+?t`RQrPc1J z+ODYK1f2Y%pgSyU`BZKI+*gfw{{V>2u{G7<9HvqNDLKF!e?eWeDAL%eLOd?!b)Egy zjGXCD8SUHd99r?baWXi#E}8yqzF$wM^{f7e#Zg_``H)M_nCy24(ANxu_}W{j$39nL zJ04-5+LxqX-09{iZKr6R%aXlU0PHbXmGt<_?6c7{?`dnrsa0G*Bi(<89d5@>;%lqv z#C~eqh~swtUSqyKxUW}O@pM-f`7DH!k%QD%$^I4bp5sHjh)N7j7mJsX}1Jq|fdZBR*q`GC` zy;6Be{KLI-J`eGIE+Iupgv6b&-K(lgtFF1D2-vF@9Rc4pub)E(K4_KDb%dS*NXb3B zXV#?o7G%N4H*s8G@ie?Ms+()3D22XP8w2{+P2sL5it-S|{$bSAV#m zmj@f*oc{oU8NlCqxA@a)(A`0(L!T;O%tS+@XZ%6_wak$&mf$IzvMYXFvdm65#&_TL zu3q8_6&g6|^xP^qvTixc$0U+R8GH^!?nc|^x$F86Ey~DL2N`85 z0+8RP-kGNfsI+3)Ac@XYuo)b|`vNxGZL#VzP>&xYyR7XiHbjYx1s=Kd&$nuCC4&Sd zOsJ&ipPysyd*fg>^{Fo=1n|F*MsXL+gJ-}S=O15A{Y?(@Q@DpJl6*Irf&_Rc3<<$+ zzH#h2R63-vvGc@{sVmN$<0`(p@7|LjD;x66RTpu|hQI{!l1J0;L|)gDN#J{NtTLZ1 z=jO`-I&KFxcO88`)aKIr;v;6~3xzJ6$2_C481%>Q?NG7W4oMlj7aO9f%G5K^_LHkWSqU#EVR2ZG#3&*$YQG;A)70X`}X$EaZRo7 zo_lE>WMdlRd9og*R(hAUw}J zWsu>BA%|dh8*lARyf$ZpS3UH?Vz{$VpkyJ0Avg?IqSE#)~6hXb^<4d&R;D8{QSxZB>PrW_r(%O_GI2B@Fm*XTTiEH?P!r;$ME5Nk}=aG zsKD##oyjD>(sjQNE>x@AX^QUysf|>s1`-_L50q@Da5@v$dh;~)OD6G|5l6~A$L3rR zPk*4#PTgw~8<*!NEFb_QC?|aX07H>i`}>p3;TQBO!RiyF_0i0StWp@9CDHY z-#E{vz~kvg^Ys{Gk{C5v9lXVChUEuvd!3K^_oLn|=k}R7at_tGyc9!fOghYE+CZ@hZOQ@wVsZdD?YQ(lwZ-)AA=PyF z`G%(?F`}DaC^s-vW4HsSP4n8OwC@!7gG#t&`0;ZWC_Ivks^hracEv5$68lu}9G8i_ z5!ND*U3_Jg%n~ar#F7%gug@&D9wQw!e>c!D@_7K@ofsZ|VECpZIhoMU0@vGzEI zkHy_mC@h0auCe^YEN8y>IXwnP?^5IO4}RR|;No@v0CHkEEuWZ@3EQC{=AiBL>GPQ( zjOO6}CDUMEi__${lprKVp}TAg1B`xL$&>& zH9uwej<|++rJlu-ILektQN9E0K>G?cs?mn`L*<{Znpz`^KGroiN%Vmqt7G+Kjk&*Tr64E^)p9*5BEcG|l< zek*X~mNyqZb+TMa0=!ejha_h^XKkC@b@!s*@n;8WaK#f_X~lpJqBz2iy>LG%!QW~g znM?g@ujkYbCmfbH0h<##v_LRIRyF83f{=IV)8Fl&x_s*#O)ymC<>g{}V{`J6l6v5s zjdbl#hqzZ*R=iD1;GhzTC$@#q2}uMg$sb?T=Br#`r*Jm`Xz|}%3)`uLoS?^ar8gk_ z!#N;Nq0wMX<8DX zmU&0CZn)63uuQ7wwtuBU>U?OiTL5Gv1F`1by!980ut^IWnZYVPUG~jY_?yNEgk?m) zD(t^E@0zj@oVYHXNF|f8abJk33QAs1VxuDm9=NI&lXY>&8lRnaDO*VLMh*r&{cC}7 zH;KmKoA|`SdX_zEUx$2bwX(NJmH7Z2Gm6Xid8NWJ)xs$s7^##cw3C5zVrb zwKsy!{ik`GOi4L#6(%E;XTCfA>uDK6n!-jM%qh}Yl=Ck_ugpopf={(Ox6m~^?0Gg9 z5(A7dh~xF?S2!ZqWMo@mZEKUr8W{3P$FTIL7di&9DD!64Ze0R}=L&Iyjk@&dRlBCV zm)Ad`r*%Av44MX~6O?^A=~#9!u=ezGJ7iWqe!4FbNrt1x|DN=cOllyCN=a@K*S1 zM!vR+V)2idp}-l=8#vq8AJV#432U0hnFgz;+ZAmqP=(?)cL2E@!v~ksuYXbXrkZ8| ziBSMR#a)*G_0Lj$y8Be*I-A?MNig`&%2Z<>qp-PRGF$E?J^3GIBY+HuR>GaFo!MFK;RA1`hl5_pFb3rzh0R zrE=-VX%6fCprxZ6&b$1hwKQEqYgq>?7}`oC*un< z%15E)t0Pt{vUQX52M|vl18b zmOVvzqh0X_R^cn_-7?}wlgVPCv$-9QzisNHAByvhNgoUtH(d3td~D{4%H}cZeJRqo z)-6iNMq6#L82V9*BAkpGqVZ3NsAXkKj%@Y^Z2DAQ zjl~EeXOXZvXFt7P2)tyr6%#w2x5XCP-vr!g_kuHUi_aYFcN=2>eJhZgH)r{7gfZjh zkRd0V?UB=eWA>zfwAS}CG^=t93YlW3zH$20OxKVbiOM%LNw>~A>~_w?WNnOfJqG5v z>)2+e9Zqun##J~ZUO$JqP$elF?5wCc8+>yE`h&Rd=|l5k426pxUzZN#D(p{f$JUzv z0BAuLRn=XAW%DtB!`u)F*mdjlpf;@?%E~?)cEDm7@|D2#InUpJ-mp|24N_c>XGDNT zL$k3sbO)HJ$mRr*(_{OM`caUeg~HCyzsgJCqk0f`ImT)R?^cBz#(#*%KgC8TIqq|} zuE#&UJC9pn=gkf4=D8(z>`pVbdVB5HH9rd|X%bU&ERu=A3Vvfa!NwQ@TXEd|^IBV& z&L@EL5tc5ysPB?GV>#RYw9XAmF$9YsR>N}~;Nu6N^y%yGy#`66mUxyaJf%_-U8}3xdec4Hp1fvs3w-dreozskL9jGY!XNzx@XfJcI#1Z z%n3aHSdFp(85zk0^Jk$r8-Cs2ESiO_y2UERGn32}!Pp#aow{<5+uoXv876?wI)@@V zZc`>0u-of{pHE-?0VV3mBgN#lV!;Y>xc~#VsoSL_fefn|Q!J98z>pr8=)H6F#(n)& z98S?ZjT^}+QVKQ)DL5JW{{WRw3#lA-3Fkwz#}6`c^Ee#_{{T(LOjC5YK@8~>Q3789 z$s~+m7ReoY@3&giLPH89kVvi*9E5_oINxE{x7vqt#sP&zEzDGa-8&JSp13_}?O6na ztndh=1S?z&Cx+%{X6>9X#N4qLL`R-IvulsdgDIy zK{1U9jo9Hb7$DeecJu>1&!+V)iS!yvu`h3JYX%V%mq5(y#GRC50Poi$xAfa^az-Qg zb&f*d5tjM3C)c6ab^id;5^I^B82MO|xfyafvz-09=k9$d&v7dwmI|d88kt>XHfcb7>263LAp5MJ9OBo@!g`|cF z4^qBnPBFh;+fbr3jF#PrF4E>~oS7SS`<;(aJ#$%Fz7xe}vjk2;e6fcCw%l#oY;~-u zDr|fm0NLJ7j3m2|sZy$pI+8jZcF6tcQrl`ai4sR~%maY2zd_h%)4zI_f=0Jes#-*l zf*HRsQPP(s!#RzFh>C}rM&-{o_&s~pwLB0aVN2NLmfmQmRSS|B;3@6^?mxT@+5{Aat3fqXFK(xO?uZh?6dNuhX66#m~_PsO)db8)9qSm zN^&uc1|3I1*QQTObMR~DIiY!XtY~&6? z*e9vTs&_U43AcfmB#AUR$y2#ry(#OYQRzZwniynfz>F>l$j*H+wmQ=bdy8`*Mo_K@ z^4XU>-kGOQ6p)WPWR6(aWP`a@{{RYEU^I}W%xZ{YTaHjq$_7SA0~qQkZD?Xul1Dc( zsU;EL3UY9IdjM%_S7P?-%t=OdY<)V`+%z#T3amNu6!vca0Ie+G2r{l5P6nc%*GzafVW@)ZS0Bd<@bNeSb-yBt zyJm(|xOH`4Re6Tl{{R|nZ5lRn@klx{oxm6yZBm6gw{2Q{=XK0}FA(Wk?N68pH zI#!_YvO8ZoD{I$IOd>PMk)HYe>!Vs(i&<6vBOtz6)2?yodeUL?N)?>*11HRO2N^jV zdsfC5)sn)IWNJz;F=sN=d@H(h9ILFfP)O{%oxj#;h5rD8vzQFBYonL|3L^89?~LtT zJuP9hxm6tVTC<*1JGLmz@R3I;Xrv6psU(5|`jP8d5!5TLRKEmC)|=|eCk}W=!jU&F zuN~d|xLhxt$e{0>?X_7nPX~N6H?yVHj;JA;Co#9g^LbB$%ba5?pT6I#$TanxrSm0r z=1@~4u0E9fNpE2?mRVmRR~)-@XRB4qRk&a6_!aM$nD>YLCh)S=(p#%rffAO?W!Q&s zG6!GFo`$+bhY0vn#At0K)9l$^CW!fwau~UR-A*?nVk?!-8fSSRkS{hIC~i+pl5OJ~n-#Lwn{ z9ad|8R11a$TN%y{HyyiIIggArOIv4(-s)_oQCLVvOlLJEr+AUA>CrqRSCM?i^BPth z+^yI2p=uN+{wRJSmN4ooBI>8HxszCNP8&PTls$-)@x+sQw%Hn$9_r%TA8< znnfae5*QQH<^+9vb|SA@4~yE~j*8LC9qep!@V?-wInPWC9-ms7)BgYvSFv1N%3+ED zl0<5!DY);Q_o+LZSuU*tY~tFFg8Xfyl#X2*=5W!6#A%SFn5vD2d9&J>p9%P`2b9U8 zUO$y1Lgq_iK)@S)Mn>fK73hn9#AvtkE@#tH>=J9tL#;%3D}rJ$M#q%1*R zR_nMY)Y4P8mr9uW4{_vK_(J-|by=bQ7%E$qHdO?Vuem?1G`8Wf98$d5m5Ilgzy-!P z$Je!cTTbzNgf%-7_mbMACkSosAX6iOxF>F)nrgS68)^)Y+(V{1$bTv*BoX;XvHt)% zv%*sQHBfFmRR;~ccbuV$L|f%xxgg+-*4Ky1GQ6?j`47#ElfO*#^{;Dj2M73hedNIw zm1|;;7##M}`E0x3oQ&tMrANPm+!t*q%3MeSVN=Xj4mymC2Bht4c4>l=JZ8E9K=Mr^ zq5<+I#{<{?p493cFUXQs@{Ev@ag2Irt`2L|xBdfg>c%dtFJ$;)Iawyg**&T$zYnMd z;@fKwyaGOGBQcOV%z{f0KGknUez88N_>Fq_2Ou2&Q84@y! zzFds$>T~b4ce~%i)~f3yminEf=!yhth}j1uk%R4ywHY^oTvZi`mRm!W*%2iS8-Cwz z#~W2LRpI`O6F4i|`CrA74rgFXw=YKNvHgCuRf0ACB$ni5w z#3h%9CusSy7n7*_Z|hAjcuP`-XyLfgu43l_kvVy=2*^7OdsIAGueBCQW3kPlx`)gx zh*^;A=Q{)6`qJc{C|)r5DL&;Dx@6;Z$G1x85^#;+ipou);=10vK`I9H19W#UXqsM(%6a~{QE(lk(N9Vyp-Bk7I(A0g;4(Wh=Kf=G~ z$$h{FCu5G)(^o>axw(y!6*9&L`>l_seB@RR6SZ}Pol2yAi)?eYHmSzzZ*gSfQt(VN z&HP4qAP1NbGmQKGl_SHet8k^=;^`OfZ57J-a;82>R1ii-KtTb4>JLL*R$d;`*h)O1 zET>>ui3`|c1W@icTEXWMVI(}bUQ@H2^gHIQj-fT&nv;(yoTrRaSmOxo?qz~6kA-}e znkD5DAm|9_7-wVXaoVUFj|gk((m1r{(@*ARbcPu@MjHw90reQqdUUoCn?z48H4P`0QvkMljlk)*Z%SX?6P8>HBXH8e z!Czc?IRc3tiOIr~PQSLbSGV$^vA9(|50=LxI3)gu^sd=1m*w57NUZvXv_>=d!_9?s}X1~}-E=C#Oe7RhJ56XXB{{WAp z_R=k{QR9rXY))0n6Mzm!ZL|JWZi`EfCy7!RLa7W)Wq=;|A5oD@OJui{#<4G$Ij}z{ z+;5!wA6n5)yQ?=d7E^P1Wo3AJ<}_JaH837rtgb$7k4?t^0KVd}v~`(QM4QWZJcl?5 zzA=-ZO}DEQ*0LF$xqww7K%BsGo`)GY$NAD)1!)X+&a2481?FS7ah{mRYJ#Yd43*F9 zcaCRKiDg#DL55sma5Il@UYMjNn30xXsSt9@BwMjlo|)`Bew}K|95IPplNb&BCw@b@ z-vcCfph0CL%_uPm1N^yT@}2j{-~4J`Ak!fqhwY)amQ-OEGXQxPe?ma%`~Lvc%V7l1 zm6MX-oU+82EO++<*Y?FsMW@3QEz55rMv6>wDj%5cI-gP7)|ra}V~~|jU@^$RjGtex zUi5gP$T(oOkx65L!a6EPqh#g`p=9D(1b z=~2jZ7-}{%+^~+;-N6SedHz$=J-T-KlUr2}A1vImNC0q7PIe@9&iOqy#yZg@6C$Ua zADleej&a}9w{cEk zl?06+${1sm#xTw5PRDHZ_B5z`tJzrqQI`b6^B8(r{Z~nT|_Op-62qg1yh6^hLjE88&tgAT;Bc;e>E66$6C^=m8G^?4? z;uw$?S1Jw($P7QuwL4r3SS}sL>$pLT(`$H{tyl&O#FAL-LG4-xf&-|gV(bdMU=Nh8 z`90`~JKfAgvJaCRNW0>hys{XKLmc2|ex8-ME}9~*I1D*)mG4nc10;Sn)<@)V{6Uag z6{QhM#Io}sI2_po4^fI@;s~FaB4%kqqtT3KBsWl(Q4>hJ5Zh*vJQquABL*>N8)Sc>Pu}iHE{Ij!?iiYG*VgxQEOVoC z=ajPhRf<>;#v3_eZ!yYA%5YV`uhNbEmfVZV6h_;X&Tx04+_T#t^3O7zvfh;0?n?x8 z`9h49^))@*3`{^p%y%58cb(jdzv}8l)p1DDt0N1(B!vu z=Kv}N-2VW?DaJ?v^V%fyh>)Z?%w?PMo%TMoCAj#dA<&F$9eq8$YfwQX=gmb{Z1g<{ zqugm%MnoSfMS?&L(AA1b0U2?qM2pV%;%Wa3zT(MP}y*<<8luD{EIpQzM3B-x<%n zBHgXznp9%OMA&n#cc|rn&&y#Nx0s$`AMV?+q{Iwo{n$Li1iwuB=AsXU5|_-v3I;N#M;T*z*@=1{0qs+Bz{ z)E6-!Lh+Vd91_Huon{cdc8(7U^b=KQ2wtmR*3OU+Sq1!m6XG z&UZOD_CIRL?!4HQX5ikTLH@@qmtdnAyZLNW8-Y5=razsd#w5SXTtj%wtg* zkiO>wWA>;cr`*SIvd3`2!OtN(ezlKpEx(w+^7HoxJNK&F9dKW5_Z2s%ej*>v9(|iii1`SO%azAX^IVwe z(4;{`Yr5kjVf4rATAC>qR0EY%U^ol5eX46vtsasL9m>nc?>+pvELg-r01fbZ^{Z{K zi+WpmG9~;g%m*@+$QyJy>t1lb((d%TduxcM<@kmC!+iazovqv<3NBtoI2_p@QCCA& zuNBT1@OrKMLegx5#U<5^!I%TiLXcGL_WRRe9|_&ZZRhFQb@E3bNWlxtRP_M34y!9JtEZ>4ejeMb2u_@9fIaYFA~*+^m@Y09~rNUr?#Tqz6j7{5IA#vn%}r_EF>3x?>gZK?>qd=rd&nt_wTPAH1enr$NDIQfAK*q=-awp#ep zt3|3n-bzIQ%JU-~hU0No#PPN`E!l1%o>_^)GLejq`_+++oBsf2fZ>Ni)~q!Lwif@BAg=v)uSaQqF!oAVe5e z8Oa3oq}+HHqrqU$2BmU~60Gi*iI5@q)hDlkXD1jH|C5YVh#R+Z0 za|t;ewgH#r0Q%;-Y@P*On@6~_)u6b9{{XuW5klUdPpvT3ybj_`6vRqFEG$B_Ybx$e zPIv8C(zC4}^#X{{M!}UFGdVddH#i-7{{V$G<&|W4BVxhSfUnSfv(l!f;SF0)n7q~t z63LJm8D$gNIWmukF`+NHu+Ug3? zhH)$I7`HAmbH004rd(aSKPDJwakhSyl0zdeERW0x$>lu`aZ_s|2;7oe$t}A1QkKaq z-zgnVS3i8y86zds8HrXH&Q5ygw`wZD>o1nc<@ukMMRV7_-)hM%(8R1%sn5z+9esD9 zE(RtkndFh$PIiravKSH6@AV&ANq9tVo_iM?d1?k2`eb|m0O?6<1-QUTBl!6#z0E5)*KGZ4FMe|&s5~46>U5MM$ zIQ!F^o0O4+UBh%7_eanSp4a^)aOB}ykXY@2C165#A%-q;^ZfC|w8{;EC z;Z5h3Giws6PSGQCHUJnK?l%4C@=Vdm<%rcpgDnnnKE{;yNiG?elskNjz5wai{c3R# zG(_?lytxh>63D-or&HG+^yVKn(qs{&qyx^`8>!#zjGAX9!9{T#VbxgU2N~aTbM7fi za4u0nP{_IF$jMXaJ%06KLj!QpO%=fpAo(d_8 zvZgcU7j+|L=Qs?1T<1BalTQ*(>fzMN$87DteZS6)HNsDrSiFspFuh6dl785!=-c&urFE+SVAAl%7zejn}??cdVv|oFD($DvX1k literal 0 HcmV?d00001 diff --git a/data/images/3.jpg b/data/images/3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d334a861b22a5d72c73a2acebdfe0e4cbd041a75 GIT binary patch literal 70036 zcma&NcRbr~^gf)0*rE1J?NW-`GeHzBf}mz8YVT2sHZfYYYm}N1D`sogtW~o%wY6qZ z)GDRbKKXpVzvuV-`@Hk=dL`aT?mPE=pL4ErUFZCp`}Y+9)6>?~1^__-fFtPv__qkq z1du}@5Hbik85ubxIq8oYN=^=?rlF#urlO*O(frQ^qoJdR(bLgfyT-(H?HU&+Cnp#0 z|GobE5+DTy1uYFN8w|$A!OXzS@qcgr|9$eW6Tkok-G?ZEfeZi;0}#vr{MQTM0sufD zFc1j%|1Ka13?U<@fRgSSzyLrn7z8FG2a_fbrhtHfq>q9j*U6+9kw)a#`0RZtq;anb zYM3;-_>CV<95UbbTRnmb2+BmHXcB5!WKA6WyC+!#B2%^M)=1yS3H*Pr|2y3O8I5#P znt?P85D-Ed5ez1cM4A)uzrh%L0YLM=6Jh|U z12+0)*FtIZL~@33H8KLgbY}OcCaPWpC|s#L&TBL#cF z+&_Q-<0csc1w37~?UnJ;#m%zZh^0prz2z?<^qyx(-li0sbHD-~13_ru_dh2mYA@oe z);9>Twr9+QZ^vX#CTl8E8%63qqIfv`0>ciW?qIL<2KEMHwYgz<)c*E;+!_Z-`g`G z(&!x_cmN7Z$oVr4$EKmN-QWqx3oUZ>5dcnv#|izd9D?R^P)X)qf9!*skr^39$x685 zen2fV&ABd)@^5mS%j8P>&jj{UT`VnYtJ9bSVi>`jrE4ws`1_y4mk#mmd|mBR3i)YX zds``<;L%Y@_sisT-K1jdI8|av1<|$3ychou;F~g0rbZM0DT3t|)8qcP!?wncs*JT7 zmGV%A6N#z27Ejr4PP-;MllixqsRoB@Qf1#_yGKD+AipD1z=G!8hJ?Y71Mo&ds=X#5wr*kOj-#KV?z`Gp){nm zq6J_-!C_GP836Vd90m}DP%@ETLTC}M=$_z7PpBDjcpV5WNCZMbAjhV0mr&mM#uy

aT7FQPcPCfl*ICvLtCAgVQBsl1U$0oCq`tOSha~TT2U0O$RlUbWaw6k zekc|{G|8;_emjz}@?JDCF5uqo*+~=QirW0})OOw&?>S54_$`m%V^3zsaVUfeO@@vo zi$N5^B02VgG53!?xJ`vTkC!;f4e4S@iOOAZuiIaByCk&lSRI~BBZObKQ-&i@BP(3El#~GFggls z)?P~CJZi2PnQh8{UfVRs}}5f zru4M*shD)G_N&_?{xy63;-&C(PFW@cBl;AHZV_RT|MnG1-$F*RNl+RHLh^51<7^1g z5*+;YkrtjWE5=CBR1kQ@L<^8RMnma$Vb?}nfWSe7D30_&0Xj640Rev+LkKBnn@7&m zUIyOTn!17spCWdgy~=OusTjvL^n(Z4k%gk=cHYB;$~VWx<-$%-T;?wVmUOIWA!&@# zF>#G3B?o&2Bp=O~;48SwZ82R_1#v!ou2^zyG^$%;s8^AY0mpVjTXZltHK>+b@TDyg zv!3aY{&Tn9^1lkEu%|=udL{CfF-=-Y`=KFMn{BFf8Ko1ddGNV+iT`bEM&rDh7RlW( zF(YvQufnR^apv#mD>%2|ibHwAeO#vR4w9|}hb_+f@MmZ!&$&v z)qcaOkHcbc1@70jG0EwynmK$4LqH%DiXeGdUPlT65O{|4L!byh4A=?gWC~@?q8J2{ z9HQRyIsOU6D#`Z(sK~MNAcWXEp{@#tJ0Q{5-6iXzM~$cQ{6ry-{-!58;(% zmJ5v~kYP|?yF?4=D*pili>wIGeuRcSb*faYN!`vH{RbGhJ$@_bmXLmVWfmD38ZHQA zL&O3>B2ovC`e^3$zcVj>M;lX^_GATzOT;bqhuq>ki06v}-T2Q);V7mMI7|}8cpbn* zp9QfeV`bLlRLGZYd$jWOsYSeN#mucB^015wwm<&>^1^`#BcDNrU7yCamqDx9Co0MC z9_cU}T_eWw72yC6ZJ2J(O*YtS%~q@z-S;%D&{_j%-zG>Ld~Fy1l55QrChdbeOfvz3 zu7g&$aV!K@Dl#Y|i0Vd!22HDb&pja9R`cfbl@7YXn&yIl*<|3gQP%960l%%SPlWDE z*Z%bAql&eSVfzQbfPjGif+YxIr2SM6Ofo=?R=_+uJQ+$K3k3L}VORv2h#LeAh0tgK z@K$&j5MT;K_@zPDMbNc*up?@td#wGLg1al*_eS*In z#}Vq=s>(t1Q=OEQr%1t_=dpQ&cSsT|Qx%7XSYvXA$T?Q>nYE&&AjFa(`>8aVn!*#B z03;oqAC`6z3g7w>1)cQq_=z-vh`vZ8-^TM0r7J6ZKG3F)_>Xb-zb)R_uiB^jqoMd` z=y>k;l67`GZmag-&NdctR0~c4;8oF#*fffA5kvt(13*`fL%=G@pTH@y@ZDGhTo5e+ z+`vTxe4`+wqkx9ez^MRmcccao;m8cYBJ@BZa2EyyT?~W0D`2tyvqW8~m(-k=Rr-kGvnI$f_P^8Kq+Cwssa9czdDHGVK73a$6*wTH}jO*D8)ze$uZ{P^rdKAfc&#Q z@%|(6_5GZDtuvp0fcLkHXlGlVkPy_2BZL-!3&fDN)dvSBY^2zsUl!9XdK)H0)fM>aqi)WlHS!>omN~XTP;WHEr)^acf6Eb zmu!piurXDevlwxqyx91E)q>%SX#0SqWC&TC+sJ} zrwd34LYh-{shpjl@6|;)`ws#-*X$@osqaG7q$pC+d~=LXb;Q2d3+zQ8l7kKIC?K6% zma#vVK`s)VUWPJ^GTbWsLLoP5lwNz=lZWX3)ve)(_w{%qVeUY?raq~UwN0vFcVVo!(m4f<3b8Xv_Kdxjgm>j1C|XUVdMw2 zpRu*P0sylpG>==PBPfjwj)-DJv`y3eXZnC=C;(1_To8;=mw`qx#FzA-Ih}wa^a21x z__#tEh@1k${Z^&?&uTgZnrx!F}J%L*Fx_{@Il2*D`Zj#a_b7yWj{Zr~?o_pB|wH3mL z2n`U74~x06f+nQ~WUOktkpoxdN#~+dL-sx-RHe#&HLUJ!tV^1w7IezD!VM$xe%I1ax_&J` z=)T|@R}e);7unaT0%QaC8Yy`A)*K{ud~h;~-AyDOTRV+P>wce3!z}c__o=r>UUx%A z_{al6ffU@Gw3IbY%6=0oT&d5W$&$s_g`Qkv0MNBFE$k#OE3H;>-2{$GQf!$tJ8B

_B1QY ze4gm)$}gHrED6rKcg#7I?S6MBi?#$%M60{{MY^Ck(K+~60GB5l_ai2m&&wv|ZVdcA zH47R`We${`j$8`dpWhgNb*AdfR!KCa+$HN#hS$ES?54g4E6#T3I*{-2FSSuByuM}K zwv_35uB1lTlnrm>W&U`Yr~wXuYLF}9pAs2GFjPPU+yX%Yd3r4X(r$>7X^=uk)Lhs* z!ZuP1T9_pi0c1r|DdKgo@UR(L8AcZgJ&-zq11AFDgC>XPz&V6DJ|}!v0jB3L!}tlpPUU04G@p6O(mlIla(Xkx*aT zF_|yvfN}FK9ii63f4w#$pg8S8$+5{$2qjQu97>O&gHchm(6|r9d$RiOY1w;IZf@;f z2r1aq*G}%y98=HpW-V;J?_|p>O(8gA0fsTTEh!NhwZ+cO2j^BFwn5Y)i$_l_R3akP za_a#$r<@R0dBbOqR}_3Ad{i+DJ1NTy2X>(^plW-o54Wa2W_ZspBxbz&Ef=5r;!~yd zLT2OTvqxg>%jfiD+}W$rVT%b#rrFshELNvM_os-*otLAKh{rC!zTQ0dtX@{MJZm@* z!<6YK218z8*u4~WY#h>!JPSWJ-+H|1+=1u|y0%p{td;F-OuUqIXnE#$-Sbn&Qu6=`*&$FW| zUmA>LzND#dyVeI-lek(Rw8#pQX|fD)fz(nbiEntdtf%HCguB6XVyx10MGp)``t!^h zW0chn2a(o&jO04X2Wieb>6Qz)qOaA~nZH6WC2X8Z4q;kv#ZK9#j*XD>04(xWEqDSX zQh+8C;irP&uFyO#fa*>xg5*9;?9jZ9_hB+103J0DBAW3&B=`@) zg~McM?qDdH{I#a)w~|H$gV^}Uqkp~kZ z-lPa%_=IWaiNkAR{0h;9tfT1

b$M+_Cpi6*l7 zU>dt+xyo3xx5Tf5&kxo&O8Qo2@6jbPd<9vr*S_9y^t^fc==J#w1(Y{qUyN0*M0_MM z>Ko5lv6?W~r2PO1$qdWi^YN@wmcA^FNxU(};DO82w?6RvWFwb5cc~$|WU@{h(f8q7 z@9G(~vT4Ry+S2hBu&+z74Lj#}OeL|P{CC9qOPldme-Q=N8Qgl92c;y3s@zYjzMX8j znJ+F(WJ)Y%rx&k!1&*{2)Md*uhE*%StBo2p5_W|&o21?W0^`$jY8U;v#hsml=R{d_ z^T{OT{TtU*K6|+r=g(nwruie={4Fg35U6tg%WhALfO_+(LP&EWmL8C*3K|`et_mO@ zcL{Z^H+}j;DEJOy{imqMEB%#t(P25-0hO0u)CHcWeIEantJR!$Vp}j(4uzVQ`GU3}e|cc+7f(7-ZQXdSLHmpmp-=r;U)u z5{L3Ke`}{Bk9}aj@XZ>x@`(tGx)*|zl6^AEyTod!I)QpWhS@Ugv)}1XPheU=)%l$g z^VkojFA6sbHS;l4dK7u(W%y@K${-85v_Nl!KIrvub9+Q3CWL0PJr# zjE}^qfq>w7Dwini68=sYoDT-WZecgD@p#yC1}!)P_iHEK96jf+IPP&JC$cC9Z;WYY zFo=&~O%T@GT>uLq5?{SRg0P&#veAK<_%ucl_#aVn+Pd1b`-T#3H(o{)WAk*v0xh-P zaHTQ!3m}5J6T~DV6f+SE(rDB_z;=YB&MA^&j~E%g!J@ouWM-1brjoA3rE;fwmmp-S zEskc6kr}(%@#gf6o}%~I+&R7T=pSH{Gp5t_ldVd2afv=Sjbyhlh$tT#dZX&#lCQ0G z!6#SkYwqgAv2{qh>ay@M4+})&@Pj5uCKUcM%>?=)${2BuFxY0oKh=Q%pzKs)7DSk6 z-n9zDRCx-jg$9{Kxy|-FrggbD5A8E(X zKiFT8YU*259 zCQZqRu@F_)@Xf+9yzG4~n8=LTj|rBJ2TJ zL^2G^$(IFAfuo=<_P{g{=)cWFFj1s{NZc`$k^Fy%G&nym0b)c6P)UaJYQa%l?J#v2 z>OlZ3=vkK0Mcr?>Rwhyut;gU{3Pf3VE0}88Fv1ie&Rg-XlnMCo>&%RNObk$=@GsBN zT3o9DYe7)Z?(>h396}K=;WI(>S^Jr#R+ixj@&*|~L`;O3DIx$v9p1y6Bu$Xrl8k^4 zlBmcKkqY)&KDxgV#Hx8qZCO*zNrY0vyT6i6aqLgR8RkD+$X$}{e_S7Iv2;w9gvmh4 z0eS%ZOE!c@MAlJv>-6XkCSu8zEe)TLk~FKX>+EXNJF90~}KLka+o5hN4> zfd>zf<7amLxacL~rn-MUGya`*Ht6HaxyS1K$$8$YqYw*z+544@dG_p?mZ#ndzeJ}G z?}*=+xRL`8u4r3k(W(vi)rIDmYt20M=Gm!3Tv2gJLw(i+=b(va-M$CMckJpxJxoox zqS2}~)U4B+P7yypN&CDO09F@3RXcW(gP>2@TK#~#rV;A%2mW_;v!cAJ69`u~Xq{7i!@cw$aAxy|`gAJz^FOq%(D-->=S0 za+jX1|DzLHMu*QTQ&Uf@VT9AW85PfoV;W#rbCLYWw?jehqeqPXm~GoVxfCB+t5vgg zRWsh(frqTe82qmO6WFhC;{N-Zz&oeb=$-Vw`k(CG4td{QYA5iYT~x+orBA;wtr$(! z6jT8`Id9QEeM?zZX!I_|0xe^LcuM3!lYyeh83JIp1ZX^4z$v811elaXAK?C@d!Q^Z zGScybQ<1dH6h_tkjPrcDkKe zYk-tI`CLbt;hYeQNhpn=Ur3nalY@wsS7Rwu^ViNWHjL7>73^H9%rTM8<+024kQzZbr|7v^#f~?(D#(e7zErNgaLvO#ej(NP7QkLvkVe9 zX3e{-?$n<#^<^?NSh82+P#r~cr6h`vGFINhA}%iAF05VP>j3N}X$^2#LW3oKHGu8t zk$FYj)X5k0hh3?v8%Hx-Pq$$$Z4z;J>bbwZaYtSiUeclK-1@`ui&JzoggJMpVE-Fs zy<H^ZnNdNk2M7^9LfV6I0>~kvmU9%`DQH`!(se0M!`YIJ_dHCul)=P z-$wXq6>eNxmE;#rnG$nV#C6pvL>meF_(?1AxmsCHp6A((O+eg`Y>%lO1m()D@AnOK zZjM*R?zX>wb!uE*+KlDiaCTzGS*!~{RSKzOnPNu(2UIknSqX66FDa2!g<1&4*kY__ zEnmt3?7~%%Po>N znldFGsT#fazXLHbGMU;R3xfU5uAG@yq$DaNZ~)jfC4AH zhPghYi~8qX5StcCX+g*`Br#U>jMB>pmc)S3FSw>P4=9+^CP5_^b1hcUz<=2>HO2DVBWvsc5t@fW*>K?fN6Fe28s( z_-yI<-3Yqn`>}(ZqVblcf*3;`^&@wOKlTAV|C={;{oNQ%W^Lj%j2VE&xQWS$jz_a7j+ikth)TCrU<_tr8GRokt}hoEvQ5d@rkn;qeILP`f2NeG0T zD$=S7_v+66^pBTKuL=D>@*TYHevNKz=GA9M~^9o%lry z{|`WYM;;8<%7?RNEB%P9dFTJ;=4$dBB*}1= zSINAudXA8gFT^BZp=6fMehJe zf%{X5aa3TbmE9zl?idMgWt$253mkLniIKN1Or4=0PM2>3VuYUwT-Wts@5f;H-}1%u zy{Cgm)(p|PKX5Wau0~L@pIUW|31@$=w?qoCeDD{WNeDUiUC0#%-Sd96)zZh&7Zc3s zB-(F!=Xcgqed?b4rrq~y1MkQ(9wOkui2f8ZD6b5akv;&sNz%<2NJym-G0u(%he2t_ zZyu9$9lQ%r1VRapL5XmN8_|L{B1G5_+9W*&L%t0GzCpl2-~hm40DvU9;rC*O)a72I zwf3XP6xL92HXnBJ4N-?t8l2@qd`y%*(CafsR_%V-P%nJuJmj{BAe8T`-LV;&o|B%? z6^ZNct*v2htJ2ogiNb6m;`=iMU{KIXLP{>K=uWU6Qp5LOu; z53g=&iR1C!zln-=_#=^@vOco2xAS!Skas4Sw`FcRQAJ&GcC$n8gAQRgU4ip&hvtl1o7Xq}0=mMxjO6{U)_#F~ z%Iz%u(Fq~&_ja(h=8Yfm-uFiV2ehM`ev3tp#;(4Ma1Rr=v0@!uZr@OEWw}N{mz8#4 zL+kQw0Y5Y8ky6d)U^DzcDI&!`?_JD$t7PG4rVwuZNiEN@00zzhat(^UV;%5X%*� zgT5Cc<>iYqUXJN9tq)r!a~!YjW?9_-S-F$!)VxKUS+-&amkdaunkD7836Z*AvLRsw zWU_3W>)`*iUDHvzHVhkw< z3P}kAltW|)60?_hm^6~N3b}_5SPPpr9Yq`D9F}9fh}lLWL9p0F2BXLK(a2Jfo88cB zLdM`7zV=0j^91Kl6CNLh?p8}Cq88^Gw zh@e#*BCMYrTU>K)(V!~-lunGz@gb2pC#Ljw)Q>`KNvG=Qe`MG*>!dCjx=h=>aFxf4 zK`d`%K;$?^q7I4Vj%aD+>(2?j$9s6$lR?5$Djj*FsRv&Tau4e}ToIf4SBTo9jCssRGTESU#N3OHEs4wE) zJzcq(0~( zUl?O){cRe%C@2Yt*ef*eID*6} z(t=2!B2fooq=4X~)8QZ^Eqh2B#YdnB6B8{sRfL8_-h#leSQu|PU5|9qhyWc7HHX+q z?Il$ErZYPPotmcPlnYHd9K>*UF!9wT%wXvH6_5NA zQXH7!3nmmk9WXngY~6MCR9~G*c1|eJ8%VZh0)|?rZikLBz&k77eO=a z7?GA50Aw@UcT7NSSg9yH3%CahF%Xc*343j0rl?Syoj0}7?z;vA3xY7$MM(AqFRvbW zt9)0klfpyXuuY<3*%@LBnm6Y`Uj_CLVt&n@$@%?{g7!Y`b> zwd_)W8?3CTpKMF4o(auQDhfDI-9^yBXve!&oN+IVOBMp=`_1$0fYa$yY<-w#9+;xD zLCEdndb@m+mX(~dT(6ne%>;T@G@M>tW}+dW)zfPim(C;D@5-h!{r+>It9R9cHEvmr zyP3zfg2k*c`yfX7@@H5&e{wWk(c?wWqU>20ubnSGbIH05Hy#{Qzejq2m!bhce6 zw84{Z(E2k!iF@AlBL+e;zw9c9H`{|fTvP5(K3=_c z{6OPL*zmp}nh8`8J}yHELfRms%yWO=CAsj450w2~zMH3K0UrXwF0Tjk7WPl1RA?D# zfVAL1#04D2mIlE~W7BAO${~?FU+?Zjs=x@{U?=T~NJ zCgf?0`F(X*Gr~r{hsN)WJ~;DkH0y01skqlF@rqxata>}kylD6GXSCmNhH5qY8Xsk3 zb9#1p&5FxaWuZk^dU@MV|GL}@3B`vyVg(#Cax%}`lKsD>kJgVB27WPd@9M{-2Ml^H zTHO1AlUA(q%vBVBW>F>}8Qgqo;X1bTJw02i5uhsU*T643va-F9*Myd8GZfh99k3Cy zyc_3K{F~>qXd5dG+?KT6a2bEr_xH=($1CY?fBE?BbFb{xjg%c^bJM3!`x52VN})YX zyzIccLA9>_7MQx|krz)yrZ{q|f~;AcUe#QK@S4U}#!fGD-SHhknH^d$_^GI!!e@8$ zmlFBc;6oC5ybIsx_eDF|X(i2@;`T!nRU4Fager1?Sb3Azzd}2*6HgZ1uZQFZi*OO5 zK;WMUxH<;T$(JvKh=r-1Bj67LNP4vhHv`}&BKdX;DGf(-Y!{M>0~2FlC;vfXe}oh1 zf8FpXN;t&EXC9yX)ZCBiuhq8frK zY@xLgA~$nD5yD~kM^CA!mbZxd%gpu^Yk0f$3BDVlb!S#|*-00Kn`_cPanNNNegG!!|p5OkB8pj=ndwvjD(7YSQpl_M+$EE5VM#o7>j#o1@m= zLn|t!-FFK!qcup536ZRU?_;nH-4_6teC`Xr_vgPj^WZKi&LCDvu=+m>LWvZLf*t2b zCL~HKi;$s60Rkl>m8LX5zjb+h)+k#sF}Bavt@}t&?4A!i z32knBv#rf0i2IjUnDos>f8)|J^!c8ov9 zxu+MjnL#dh&|+|x?dDnYzL-gCp}W&#y#)7`_(F{Y4yMla5L(!`7r8@AJBRr+f2&P* zExjEB-KH0=92&B6fBLDrEm|ft6ny+7S<~j!;rIUApwy8-VCI9YPpT0UQ1S|i1H&KY zZ7pRdS#i8oDQ77CGxxd6hRsVSQLOihZ|m+ih2n36&kW?xY=^?LWN|a6S+qW!_0#Sh z9zMFm=@$kA>4wf7pIkEw9m2H|5*kL96)rHASC;uDgeE)X>*g$%D^2!jsGa=GSMWc; z%sSQ6Ds}y;wln)O^5)Osr7bOXnZ@CH(Zl!t0buUjKMkkzCHFsMC z077bJ#B;*Izqi6QJ~XDb$h_U(%zHzmRen$!SZXaz{l+Xmc!h$}J?qlI$KMcLy1lsb z%q*-moPS}g&R0L@$-bzk__>>Og*CK4#^>fgfOp2~+nHXsjz!fRbV+D}?ySMU*8=Ur zm!u*JnQ+s;Uxc2S$uKD!m7l$S(>`9W^aU$WlQH|#!dti3eo5Rr`25acwDfA`?~2<^ z)6VzLo;5r+%$j}TNtZUSQT3SqAE0KBVk2c$O|E5d+#=|fl{=zp!7A3)3?okT|K52X zZhB>L#255s*d;vIH86|zZ(Ws5>$3$n<{NV1H+;k{KlD1+=$<*xJ)#sk`k}I1W|R64 z@cY`&@-xYU_PjIMHi=S$qqh(i^&`@()dHa=Kix(ZCT-5>H9ULr+#gV=qCFe z%n!F1c@}l0FwnG@JHvu=SfI7UZSqZ`Axec4sXI%LoZD)S=r^|)$C<1)BL6cqc+~SNf%7XM5K1%oc~`5c-2nv z>8Ed%<$q^I9~k)-mQbPM*N$e^mOK4ic54*WGi=ZKT+C+!sS+iJ(w%zc?OKET6qn>} zN*%+q%!wSo`+oTZQ>dG+ZEh?dUUFG{lA88zMb2?cz3!^-5kD|IcYJ7AD%;aC^u2$~ z;Zuc4py*gJT65=pL(eBRoAsUb*IVN#8KXh)1`ZygL0@|Vufqi=L1#)JFG-&h-olEo zpuyW{MN+*FAAw}3GUQz#lner^#YuV>PYB$4oQzZ?&5c&U!x$sqbqexkQ#djRLj?_e zidi$01R*;c*L5OHwKAp75Gyb<4X7fhZ<~jI5aKu_YG=?sS+h^8Wb@YYtG1&*R*f-t_FoLr%}|P}pt* znc}hC;+DC!(azgdsrZZP?E5EU9`akWb&rwFvWU_|B)mz;*C$f)E$g|2PbYVr!T3_0=s{yc}7$#oLyG_Ely%S%Pw(yB2+Sb z*B$E-!zCUrry~sa1yJ^z?tLBh<`r5uDh+ztuupE<-pI<7V?Ta3=pTTR+Tz$^N3#2Y z$H;BO)XmV)`HZ>7Vo}rTfpDr9F>-}Bn)k(hKe27K_#9ienT1HEetJUTpnTtb4qf2) zWzh$)-t;jfL^z|6*WfteiX#od^G7@6)Vh!RbE$TB+;=k~+vklz+zH8dOo!LHrnx_s z^Q<7634ujoJpKXVF34tiZdNEd@!xrHg_or=cw-=Fo{@k7)m5 zRq=-0xqgMW0N5-w`Vr_!44uVr#K->E$~M#uW=+%T45dp{pZD?tc($?v1{K;= zP@zq4&~A(0s^^k(;;%qly(Q^<*`3-tn=3A?tSoLb->@8fOPmEv)RhQ+^%E{SPOtQB z;4xXuTmFsk+-YnJ*H_EV8j4tW_2TshHR0d$$F?16H=kPH-l&_(Y~81PPgr>UA*rU# zm2o}PxLsI2|2dxCy`4+ryHEe&H$~|`txDmA`yZTgy&l}(4Pv&Ot{Qx4iz`kO^5 z=7nlDHVQ{a-Xq@os`5W3?ul&r*i-+_X$E><~GrKI6tqgdT}Om+uDK;q1XE z0Zoz^M(4tXhuuK&YJtGdNOc?}P7v`Gt_3HR>5!_^qcl+h3w(Q{Txf0sOO%eNr4Vm{SMK5bhbXyUp?mieV}$PEBQFSI zUeJq@8ISbzm-6Cyv4pzFmPvu-ah)9aOZH_5bQd4l0->#u#Jw+vJI@lJ>UI&0KaMZ~ zxjB30F2A)V1HQX9xxTQ*s4#si${iyFO}Q&;&Q*_}1tuK#&`^r)4()JL0Yrsys2*>VtX>A9zJdzH`k5r?N%zn&yH z2Mj%!Q}P$ReY3G>icNu3p|R<9nWfL}Q);mmbvAA>CLQhR`a9mAHad~ zcluP^=)Gg7-|pp;HyU0uVYd{LdWRY>+KqST9$eOj@dgo`&h2gya#&7qTsql%D{M%Rl4-_vwR5@hq0Yn!@ zawZ^E1gTmYkARO(82&L{dDG+G+Whz*U~tuRjf%~R?+&wS1|NTWr@e7vE&N*CwY_p@I#8UfY1)$8cz$(i zl#bO_tb8@+uxl^6jtdbl}$Xw`GE-p%EVC0jvrj1%~JfLz_Qde?LIzHx)+`=U#0 z2p#*W#nery5~+oey1DD^>vCO(=~GDyPCMlddmpRQ?S`rC^xHw}hB3o0{o`Kx?&?~M z9%PQ79y2Wrp+*P5K&SW9XRdqire&>WQ*Y5YD8U4J1Icc8FdE*vGroOaZX*5Q7kA)& zU)D$=E;*HVbqQG#OFUHtR*MgITN0E!-92|yw~A;|1pOPnu)f;ZIPgDNejJ|j2(O(0 z92+5Z96XJIpeYf0U?_w_fFcV#2pnYMqi+E+G1OjnVj71sk~%iH@J8``nHL0B0$>k! ziNE6rf$Ojxlaw|n%`pPP$ceW;Lkyq;|7I7S*!_9M;7e7G8FfO2%ms+v)Q{2%(7Kjx z=Ucss((6O1vC%55QvZIuf-zc@vjY|!On7`&|2+(c)Uck!t-4H&cS zWR^&F5uuX;W$>q1eZ1w?RY~nisnd_0X+|>XuTK;lnTJJo;%?6;yf2(f z5Yl3`dJ$%=Yo>7gKzun!N%w}E-jSh)S@_x8B1_3f2RjK0U-&PoLyx~Fl{QQ`xL8SN z2e~6IApRXQD(~K~ZxtH&8xg0f(W4gL788fSAd;K8P?f)QeD-J@*C01GD7VjSgK^gp z_OH!$lL;)R-w)MJdh$ScII22F{#^0@Spj54T;*Y-9Q@s>y(UrGf_1qK>S#MC4 zM=MpOryJSMwO>s?HRolepQScF*n2H})J)WMmv-(X)rNhqToI>qZuI2>~ z_pzwMUY)PJg<@9j{_WSXZ<92FUyN7hb zRk%%oTSB-s-c?1q&4gY*5K=6Ut>Dn-xt%1#Mku(w+)r;xD;s!vHvRtXGvAN*tc-{j z7%x%Pl3ZzFM(Uy`LTVBugql$$bCo}FqrH*Cw1J1(-HvUd`Czd%=ShC~H^0XHHpe8} zsU0dAj!wJVk8m@tSzR+_wRLXBWqB$-x%kSxkg8IrgbX(cTg{yBapqRC*hCJza(m(J zi_9IDpEi=iS4}f_)ojSq4T_zm8X_~<#y|Vgv1$gn^4!h))5P9zy8AUlny^HyXlWsb zQcSo=Ir|+VUHE77KQc}~tNniO)vhpQp}Xrs1NPdnzR--?a>0R=w4?H=si{e0@HE?5 z>ymq(mLHAS{ol^F?&-&wjBRwwGjf*kU)tbHoIlfQ2 zN8;j-SI+Lcy}nm!P_bWLkqz1a1_bNfg~#H89G)GPHMv;72uItA^{}Q7C@>WAT?x( zAec0^(x}L`i7pUwG?XL`uq}hpTz_V`gWs5VkG#PkA|mcB>)ncXax(T>X4ZQtWU953 zgHIjZ78T~i<1YCK@<=s{c?jLMke~2wL{|0_e4ONCw54O0(L;R&Q_<3E>VZYNFLq;c zaCtiT+!g}dfC*?dv!w%m>GQgv*`Ng&a!SPJ8RTs_YRvWm2Gn$B(>~BxSraW zAN0kjn(No`kF}SZSKO^L*LPhD^x{Ayt^2^5H*NUq#>JnJ5#}uI2`ekPw|75dF^MeF z-;b^-H_96q4!1ri$-T3{`e;Wc*rlVY=jsNI*EGXoXUCL&@0X)zdO+{l$EHxtINdy3 zU5Ai6H$qKE_iePtNIHkvDv$BZY~-^4#!qfkowvxOJ6LnRTatz$Np@^F)Nc9lA20u% zHp#%~l1;a=Hq+U%u3?UKZpfY87N4=D&5>vBfi_`7n{f*E1%r|s@F9#e|vd) zv)t;)pF9@nCjc$bH^yg@`R0Z^&8(5Um>LVCRO{T3ERN zsw|2UCGla)ReM{{7xf<1E&hJ;+-8~%>A==7VO1BU9P?{z-jhxKQ*2!a3tzp`O!1vb zr{(uo4Xu$cIs0YHwOpjr2buQ`&!5|l`^`S}2sg1hpUb|j_pBFv6Wdf%9)E{iJV3nbgTax+tihDkOZ^}@4A8* zr_sSAv0wtlAQP#Fd5D4`jU0W8WQo7w*8v*f6jE#51*s(lMk3*|KzcwXJ`fg4JE4t+ zM*TlDon=^5?Hlcf9(qualtvnnZU&^JYiJNeB!=$p7&-(&au{kDN;(Ark&-T@lrBLU zDLwoBpL6)+C$3Abz4voJ_gd?>pvvHw8)gTT9?4)YG>a7b2F-**vQTPMs#?DSe29f; zM4AF{Od4E8s8_{PJZPvCvBpFSMQY(-Si10({iCTmbGgO6AAT81nD_`7$0-Ps)Okp* zP=>TLG1#u3wMP;PXXNZ0Npl$067iRqffOX|H5aR@@EP=*7n32L+JypE>kC7pjWdM< zxlf1OR~-dsB?xx|``N5^(>lv`%!17G2BQ{L>c!iahtl`LI_wfs`+3XacujY2LN%z` z4l#x$D&gz$AVM?GLZ?RWL99=E=?#v2p4pkS|3Ix`qWMrS7r1B-wW!y^yPfjC+p7oB zbUyco$6&K(_4skRexwQmtgkr|v$MH9^qQpu6yK7~`)J&?6@E#@ERc^fyzQS{a-DBt zgp;^v&2aaoXD$wk8^wt2k74L=RtK@yCKZ3r{8*k4UmUgCN$Ue)9^ip_1Nl4{r*QW! zQjck_1%e9;Yx9nRS3|A>v(Adj2PcU{*fztr(GV=$NG^4#P&kbID_7YV$TMpBKFZ>F zmyAlNH%xz3QrXEx{^p%y@i9a>w?6zB5=e{-fAIbD?V*0c#L7G67c60cfIG(wa?S`-; zS0$y`Yjr?MbU%JsB>lq3-nc=my3+r0xCQ-#m7X_bF6G;MmtOhhNB=;J6(Q2Ep2V!( zg?kHlhhUE%=&Mjo^+ZZ<>!pMjRho1Un9DYL{>4cvCsN!kmEhI3rz7jxQNhf+9m1_fqvbe1OX8X&$R~&!Iby%8Ac-u z256hQQU(=r-?d9#&*?Ao=^OTt^0njb<`zDMBQMK@r|e`}#vQpf#%*2p6n2fPkDJ2T z=ERNlYTDa+)*d>Wuv*e;&FCfnY4vDLjun?5eziFNw#wv+Ox71Hd9YXc#ADz>pEs*J zz{NN!QnO~y@~Iv7j0+u=mF?wQIb^Nh(bICc>zi|Z0)&SWS%B$qrHoLJ=a0{e`?FV+ zP2-0f#ViAw7>}Uf***=0Q_GiOOfOjT0q_IhJWNmqLDT?`I^ZF*qr`mjb?9+}|IAr_rryNiI}j^O%1fdsy%5k7^|lgV^N{0J}T2^XVaxE5Poij%yU zJSsv8A{7)KxiM^K@QP(yXHHLymB1B^K8kGRDxFgrAL4oT*MTF23QwDcC%QM2y2$5s zL{0__QYq9l_Js10C%AMIg6NB6x5IJ5Jwmg}B_v=XRZkUuhxVTDYyK*L=O*DTP?IyK z4IajMX21^Uu8`{;aJFB>d zCYIz6$NQ;&fFr}rTLQV8y>eY9)^FjkW*Uha7id&b$Uo3O(8%~*bB*e&19j~RevZQD zZW*0P>I*C1KNtk%pPJzlu4sL^Tn~EcqFn)RCQjRt5SeMQEy^Lmuljjs);hRBH^{M) zzK4%aV!(Un{iOz0M}WySWw&MkYN%JE0e0Hc!g>p zND=3lO1>|2w1~5!qWrcjbHnpbex#HjK^O2 zxVsi2-b+vtty5ao8IvkMk5ydR)r&P#lyDGpJdm7{YADaf>6!RgtE8>m*LQ@&_3*ut z;Pcmr4A16Lmzw;^xmQm&9G5b9p0?T2S@>$@I`|}qG%fz@?^D4Zs2Q4e{JF9~R(+;R zyJr71A=jxR&?uNP0GK1xmHx-41k z2gCgRb6@7j8fK4}w~#vvELB`SH}1l0{8*lG_mCNlBUYU=)_k^-e)i!LqekPQnr8wV z28;6VE*Q+jPQissz8i4{rG=&|a-*>3u&$t?h|>Rn4h;xGD0vS%{tzy1dAKg0A{QQx z9+n{oHcwO`B{G^+*9V#oy-*U2h>FG^LLz^O>0P6^SnYHAShbe%cxtzHQRNdpCgGG1 zolykuIrL1gxtMo=jT&wwreYvsj)K4k+B?{*WOU!b!#Xw#`v=OF=7LULC>ZP{N|?RL z)1*?m1eXRd1Hy$mh*>8`f#Ngc>s*FxiyFBBj@xpgiv88+hd)ev)5J8gUeMj<#zl7)iHgl6{R%BsKWn+U&Gm3z zq$Kf){5?m!w~Jjeh+I)IyVu^UZpG}ZB{_0v7)de?=-2KDb<(`r>Fx|_FT5-TFh6YA z7j~jaNp8L!9nEggwQ}?B=Mg5ZYNeL*<-3?k1qpuPbM+R{X5N;TkD5)9^e+Z~QEL~5 z?l3yunjWm4GJ|2a;b|$>hw=*;Ute6`?(cu|RXk;NFF>^& zWv+DG@*33E4W~Dr#IIE7iK})r#zCl%UzfiPbiYS8Vg}s}DLmAa^(E89|5U}iX{OWK z@iT0mCi`Yt&DK2qG)6rr3NZmI;Bey?@F+ClIiz=@`od|aT-ybQVyQQX+_W^S6o~f^ z^DigaqsD5~&dl$9&tUWPZcaL${kPADN|QU({pP%Dj~c%PA!W4Vapv6CVggf}@5yya z`aRsaTR}sdzv?;uSj{$dukPLS(|eU$h<&6zInHmoPVO_cZ`5uXwY2?4=*`%W(fBIa zyeVW1TOz9d>#0W869KV#4HHB{ruAw@jJ$uZS-TqWi*k| zA_wPi!0%dB=J_JfbydgfqzVeJ129k%M5iFdv1PCx|(xccT zxo~w64gee}e1r=e31nTkirBGPP~iw5y9J2+4~rT)aFLw(KqenWI%fc;!`7u$OX;y= z%42k(-~jcfKan*=ogJYoXNE>9H4%c5&hNiN3|AC1%3)O90X%3gJtOg72pw#{K@li8 z8shg(Pxp<^ik|mISma27M9eGh02zO?=ME#pgwKe;NTrZT=syq!2(o#vxLO&0a~og3wq zu`tIhbBpKit<955K0+Fg_v*;i*Rk{4ehD>NT<&!0V5UDzS1ov4(d-0AS2r`-{t`H? zS=nw4FnHS8<(2vgZ-(__eJw{Rt?7*D(5+%mNO_`(!PUV4PuO|@DHC8C`_|ABl85Bi zS<7K*;?^%^`(yUeVs#^%>Am^LyCzJEI5u03VB6uerJ!3i*9Q;LBbp`5SdC`>m#o(} zfh&i8v!Zm|^_knxKEIH++uyUz)(ci!eQNu~DHe!?D6CO^zEze#zG<7ln&`Ea*U{0s zl5-0_>72b^7^|DH+0As$BH+M2H^fpw=_2H~^r1YUh*M~o0$z(B>`-8LWvMfAJ!(K* zn<0OOmRxeQ+Pc6M!Fe|4uUGP^hVMPpxi$#2G6imvAH81M%@ z&soFEL-0CxO*AtO9Qx2T^DbF*%+0XGeg1m=_+9;k%Hm`B@4gk^Z{uD%F6Q{GaIKfq zWA$IX+=R8ea7^E8wBE!->(}VC`}apxgt68?a}BVWKCHRFjp4EF3Ob&&kJf?J&b4Yh z{&l)!iiYpQUh40y{(6WrL>jpentveu;!>;JbdG8-yQ4%+M!=V#KiA^2?N1m>22CvWge8a*i@kP)4ffJD$&&IU0+$&XwBJ*n^ym`&5Wi+{mvk^h|TjXz4Sjj z5W0M(6R*1t%(tc`0q+&a_k=Q1Rx7-s_Ow2zYl6_on^~dlM6A<(}aZf+fVAu?pXzr}aa~(7G>a}P3)%%{2IvMjp~luIi-`P0If8_glo~Dvc*H0`bge zN}TiL6vI_&12wt|GS^g8<+F4aUg61t+)m!!2}P;ZPYOCUKEC)A6H+U&#Dq^b6RqE5kqimSFT* z-*t&xq{P%uZ}X(5F5b@6cQJAClQ|-s09NY4?PT<;JZL`|PNJC;t;HYa*pxuIIR{64 z!|AA6(yLg!7pr2IOcp+ROYp0??65lapaYKJSms!w#bwIyt#E>gev}~t9m#Bha;41@ zyBEIeq0LU>dnrcesO#%0m$X<0M(Hbvg(pR5VwwXymWLdgFc0qmmqQ=bmZFXwQ z9v>Sj|Nh*@`E5vZN^2e{^-{8}xMTDe9XGJ(MtuUpF;E+9n*dC!tPEtYAR$wV zSXdp{5D_AXpd<=NZgAl#5<C0D({Bxh^DvhryQcgO@Jflc>iq@=R1D z1sM5@Y|ZZa1B^ll;Y^D&rfelf#IqP=^8mG%WB8&BvdYe=#sg5G*r`6k5>7s&YOMQM z1)$eH3fK_kIA(2#IYLjXdk@Cb5w2d(EkZi?NQIJ!jf;YIPz0EW)x-d^Pg*1?D-k*J zKJ8NbK;ze_fYS`+k?6x8bL&@8;8^jktoc_Ph1%=o`7^mGX_DL?&xS2q-YeF;sGibG z{$2_;VDK{=okcp?qQ z=ND`LyyFTll$bxQNf!JxNti*6k-11Xd@@|;RVk=1%3$@EK_g}B&H7#nEYaij{_BqS z3>IHLRa=;6ySN;#!5J6Gs6{j(jYcYsYBEL^Vcl1q+w0w_xbsh$#FxkMd_BdQ$Nh*E z*iTI+GHegK+}i`+G-{wtKkv8~rgddkj@+co#0Gz;dNzy6)mi1JKwJI^=dMDf2R&WCd=hwn(G;7t8!-SABS0jEm| zNKSaqiSQ#R6~I#3eO6M175Ynwy`~xTcUs>a*ALuRr2~C3-AA@wKC34slQDnAVn;iF zu0s;_wt-hD=bd{#!$=9;{z4~mXLb}HswVThxqI4i_s-iU*&|&?jxT_JW3Z{C5MD4{ z*J--w3+rx%@9UNyJs?Ird`Rp^Dk{nLvF2A@^P^l|QM#LtA6d;9ZWws!1EKVmzVI6;IeePo~6Y~7OFA8vh1_?+4T#b6P zi&nz$DuU=1YSF2Oq#@@PI}mei5(Q=^Ng{353xoA%0PeaxSI|#J z$^0`Pj}kI!67LczbLXB*lH3-$o=?nJROtEHM%erB#k%m=the!eB%2C~@(( zaH4g6mGCJPna`n9s_cr<2J%YS*iZCGkPtR5LR|vDHb6ySNCc69#zS#+2@F9g4lzVH z)@Z>5!Eilnd}a(2J8>=+^TEjInIi>@9yTs3{!v2DX-t;6hBK0Z;1TS(Gb?@c7T(hl z1@jM7hLU{=%@Gs?NNZ~{#KV%GkuA6V`v88kMVIu`NMuLc&t{02ou0KsQEo&4(iBX= zFo^?ZzHd>OxfT62c=-?HxI2wm<>fW0^O=lI@B49+Fl*0q6PN=xnQs7LgBXf^Q>Cc- z^Cf&4M&7H`8x4wO3DJ3tz4CXuR!-Si(JIR|u$+pCMSYRL7p-zAFcvXbqviChy<<0! z7X85HUJBRf{2Fj-zUavEd3*TT6uCMtK&J?(G|#G>l^_*bNMSaX$*P!Un6W4QUKga= zI}k!fd~2occ>Dg-11K$r+uOG-E=ht%9j<&e-@e4r6xszZxf=WY%Gq~%+TKy|A}*hw zJq30Zdj%7)?Rs>`^3&z>9mqJeh)LROyIbU|)k0=LJ(om}11W$Q^Z$#(3yhtCz}V^H z_S;GxasR)uQ!Q1*1z63I&N`&*{>L8hfjR>#IS~MJf*6|;P;RM$n_Dj?6RZOl!ikiV z`h&|9U#Rr2`j=EqO|qKRdLNqo$s#gUyv%~WygS?4&x2(fZI=nF#vvq>cqxkK?=yYP zn_8YEA2xDXxv6g##r(Wx+V1rD!JS>?!dUdpYS~x*dC$zPu!kL5=zNUA|FL=v_3<%X z#MruR{ReM5zvXM&#^aq%)$$ylUb`T+3k69by(TyJ(!93y=woZ zip3z;DwUM?FD)v)${O;*;I1Ry2W>O%YZXQ%;Q?2E^fA(@Q-2>1SS<%xgn%oGmqZUE zet@hj8U9H8Mjujf~ScRoWe&ZK~0R@K~4xkNxrY>CQ#t-GKtrsT;=!zalw4qH+B zh42X&N@VVSf$_PUDAi6#T^rTYJ>e@_d7-8oSu(i;#o%`@)%pl=!N76Z3q->Jb~4Sm zGSh1?YY~cob_6$k1Sj(hI)#g?Hw=oX)P0-<_wnK3oHd-R`KGGH@345r%zIVLwt^ z!JwEKQ_%^oHyKv8SV^Y{#9wK>P$1AAVis3$1)uAN<3Mzcjvt|P1%ump(qG#tQ$$0g z0|0ulBr}YDO(v6Z>zA;iU5`D_zK`jx#!X!GKhWvkAM=Z>C7P{*z_pyWE`5(m@dh8Ca-d!cHr)zKh zeX0w1zkk5TW_1MKjU&9G#sm7+#)^D=*ZW(G?c5+o&>YoDAfyhCd;e0-W)(_H4sIH^L$i5lfE6K}4~5=ky7P$VZ@O5e>j zT1~}SmP#6#waYe5`U$^`N_&KuSuw7Ak!nlQ^!DMftG!tgFR9i!TylX@`Avo`NBN_2 ztyY|-Twxrn77W7~1kCR?0PkQl73owc*(9nfKm6lvxGeLw^99OajV!1jheK=N^i$8_ zo5B&*wpKFw$c>4gwdim}oA;bJU*tv`rTf!YxT^dlV@Pnt}t;zt7?<$%{LMvfaFp;Xc+#bcV9j?eK z`m6z~1kC&^BNBvcbK&6v5$?ysdVJ8~q*;%!vao&SGr%f3N<@$s^;~SmE6;?76)p&D zYLrv$MQ-)`@OUUcll(!vz2I#TkovuOk71kKp%CF_S(EkZ^hY071{^b$L7%*q^8C6t zISxJgs9-&B;r&nQzheXY+ILdUQo5(s{S`M7%Qfm;->b)SvWHL`rVkvL#jU%#xU?Z* zb($2j9)v_|VV?GTQ;!#>{6R{b%p14<Vk6mMARsB<~`TI?kGAZRp3Qt za7rDiea~oJD~9e``s~bN_05+8t0!ubO7`Ct$XG4kot`SRms!r{Ppd{bs0mwozI>;O zwXCf;{fuWRyKVf%sQHy%d2M%5&<`5OAfe-SwI#Z))QT=x(<>^Y&dc0DeSy}nP2a17 z@~wTKsJFn3F;i3Nb`z6NrHA&VGTGOC2Kgq;@t>1v_oej`%fqbh$VDv%Tf~J-Leld4 z%wKy=SRkx%Dm#x(yQg3p9A0x8(b}NF-@8UCwU<4I!maQA=$W{9uy=q`EFlJFJ89Qd zq=E&oPeZIK1C9*94CAAs+gjPT=T0F!W??o+oHa8c^n!g@lD*`&O-gx^Ysnk2GpHhd zA|VLS-)@jftOzJMic3iX+Kxi>5&TEemnp{LqPbxOMUXIiwPxUA6Ksd;@@J7U>xRdG z!n2+$v&rKULKME^Kub~a&_GsPf&mZ*l>;O2CZ-0;rK1?g=#R{}jEz@1y7dH@CZ{y~Oy@lyee%|E=^4@)#02X6 zdA4Ob@u0)#Rl+2Gs0OVlO-h+sev^QUGZCdkNqBJ9RC5ZY378b;XW_$Pk=E63WNH|= zWqicj$=B7(+06#Zqbnpf4*Q#DS~Yf2%Wq1CQ^Sca>%2IcYWCtu@EFgTQjWRKx84)t@|P>5!56X$c-Y8~@q4rQ(Tlez*(;D7Fw)xH4_{1g{0lv#~j1RH||-GZ$9=|Uh} zFeN;QA|6=Gj0}BCJl__S8r#JSWaJY&sf+byzgy-jOs>7FUVh2sQeG6q$vIS6+*oED z+2};l;wh7m``9wu$HT4fmu;WQniAR%hR$c?TZ1hL6#3sI4@L(~Mu{aq>g=Ouv({dR z0sk%#9F+K+oa-l-P*zFzyfm=5sk5c|gE)6FOdXo!YB((@z#Ykaqn_(nht{w|lCf-~ zXDwwqDhU{Elnvh$h)bmDY~d+6y1D)XIS3b|4k%BTmf zH53S8(Ja>ty=yCQx?d1&Q5d#emda3@JF0KnW?~6CHJQ=+n_cbaMmd~K9y>zz zE@p;zW~0bQ$_BCHPJkGZK)QT!YRtPtTWB`+Cg)8V zEDEb0r~DDe(xLZDTQZ6RFgTi&QXLB1g(6O28n35GkX8{`nFJ!(;f4=%C810r`r8QW zN~F4j&(56c_Bl3K8dWTknn3*d_(!jK1@V~- zC4=gxkOgVDm)4_#^8JO!=;n?NTgUIZza_ZeK7G0t+kp=vmbk9jdOa|=GWYt}i@4c6 zJ=z6N)#E)}MLkzQN9eM$>QQrxNRQyU`(sJI1D|U8zrzmwFDCh2)Dp(sf?oauRT^+^ zHQeO;A5f>h<)2^iw(bQ#{O`NmRv-L#Z)7~QFs0}yc*Z05Lh3~NsN42}CeV0{9@s1j zMPQx4^pi6QK~!L5aJs4pHg+J=0T*=3OoT_oZ0IJKe^aQVQ2(Dd%kt)M9Y+CrfE0a`7Iht1*xy!z%G4a&~fU4h$|h$tuY8>PU6a zDCisfqbm23>b0ea&xQ5Q=n9)N?=!J^%L5uV1W25>v{!7}YX3}dU58D=@(JH!iB3c3 z>=6!bg6$6;yLZJ;R#sAqv=_fO<@^BC{&^~3?AqWs;F}(iJEkYeQk$yVnp!ctds@%| zZ|9edgb?f}uw}D@7}c0W$N)?{u;&FAg92IKQQ)%XKpOynGV^~>H`j>!G+?C*LSS?4 zheHryv#88qT|5XEE-{$A0ZX@xl37gYKf)B$%aG}{ffBwuHXaxhK0@{>D|`DgIE$){ zMuDOygXCS*Uk%Z+rl|*#nb4W6=UfRt=2)wIhj{5x)u7MJHT}}Wut1(txvo|~`lOYWk`qRHLk*|g@&b+wqOv~{}Gcx4ydLbmo>zDg;xBQ&SR z6WyxQrqM?SNg}-#r``i`&d&w>SK5|lYxM)XM`t*i+V-g*55MJYN-^3aRcUlE?;N5M zek0@7=G(sH6!J5dFQ7{IaU1t0d15g<>hb0ej`^p`yn9`0nE=s(OulsxxKKX(e@ofc zF{+C0O8Kte9oKa;CpQUGlYP+=`K&p8pnPVFETDn}B0|i?^q=wK21&4j1;%DZ$LvRW ze5a0uU#?qANcM@54iW)i#{M76YGnLZV8DqQ#`HjDf$js=@9GJ_rkls z8>k5KQbTwQa@0^1u1?L7kcO(=Ezi61HqS~M2Pmv;JO;T+Ro=KCxjNjcw^1$)u>8n5 zYP`|%d&)S&_?`Xj;dK!*a8ZlHu6THzW+s_g$Yc9g=t1 ztO@?LP$iVuYmjZ_cU(sk-cL2OCW|dt_>ORS6&euh6Rk27QzGcHDn z&mZH^<7N5(12wg`9^w%tv(shbrYALj4z2nz_2ZGu^6fj5dB>adV4?c={V(qptXzwB zsC!CoVt%-Woi5_7hcrSdiM>g2z%-e93r7*o0Q}4jcsv1JQ4f4n4G2v!*oLAYOvDj7 z8k3950^P#m33u6!;37jrTI1rgfPhbZ_?fY_eryCKLG4Iv1R#1S0AX{8FBvUx_e@h> zQAD-89yWf?-KoQoP;V!~qZGA|!T$$Bfo_nACG&m?<`L%#rmK2JfcGYJmwW_K^9dO~ z%;ZfAdLW1@{c_I4t{$W5`Q3>{+<_?3`gltL=V3HOG%*JM2_z7}GsH%etjJ&0l1~+@ zZ{^K9|KJ1FwzbcjNLZRGFQGB}BF7m(mQ#!SH^?;ENTL6W(RV8Hho83E4Ok!c(mRLS zCywDE-IMs$VqXH>)qRc3db?M1s7XRsqCvO;TbZFB%h`HzR(z&o^XMuMIhnV0Z2;_x z#Om!2l56Gh7yReLBdnnfwm4op@_i@oER)TEHEB~d3Z`7MqtwM zms{8V7?~_qTE|Aet5|oK<*cT?{a6rxjo2)b&OB1G?%|GP=;URvoCST>0!hXoc?b#y zuZSLG8}@x)Z)@OTYnDFxnsk2qMLD+(4eO&Zw*GUim_moLXEE6Llw>;W-)!RnbkSS7 z!v@{gLWwJzKxspsOZhB%EhaBxuY0EHQTv#HE2o6T=~#`l7N-gvjP-%wBCBt!rQorw zY_;omUhWprImn_bMN9wKBblJZi%NLYZO+8QJ?V9cxbHzy=G%j0D2YUI4da=O0C-r1 zWp-;vShwtSK+MLzzt6VZ)lA3u-uLYGy@KHYCX+OF5@u|cXx&%;HDoE~kr$4jFQDn9 zP%mY6Xdd|u7dZ+8;fCsDgs%83>C>QLKiN5AG7Cq3*1#dr7Y7C_`163|o<=A;La zQ09vtwmw)>we+{G1ph#4dZXf%i*vFbQ*gR3?~|&8d8;d5Dc?UzX5F!|gW>m=I+y1z zE?U3ksQTYqcyV#Qu2ET;R>eRfa9ph;KP zg3sisRU9jzTuLK1g2Dhr@gL|oD@6zRIXu%c`HfNi&Gf&|?R`B&sH7NE3}seF<04-{ z9z-Bbq0DK2Bk_Sdj4p^FME4mHkt;kwDNbhi*PD*MB&SOi$AIX|8FIOnrk$TK4})lk{`%nKEljXf#F?+ici z)ykr?iTL*2G`+m=s-7es&f@dFVOvySbk%G$oTDrI#V~jIyRv2@8uyrw2M6}b8Y%TzYH<%Mo>B@K=`;~BfJcO-Y}>pvl=!?84_+t^M@HI z7|{jVXh?N&NtM{~Nl?+i-yFOdL5UAxMS%Noc*0#1B#}0dkTgPMBo@>)s=jiw;cm^7 z!x89o#Kls!nu$sGVSlh4nt&wWB2Z){A$+ zlSqq#k>F^BdHQxMkE0vSW^ zXb-W&K)?PPTmq?9K-g^^6OB8>g#AxODk0a66mjBX*NxyoQUduwd~AXSEZkxoY+XED z+q+3Y4qT^M7cpvSoC;XAV7sNNfUAZ7$x%%hMWyb?eonhf|F&{ry=$wIA_0p|j94v> z>4$%yE0Layn2Dk}ul}XIWU+h^G4YkqkaP1#OBvsiLMRG8xHes$c!&^g*G?t>ait#G zPH1m2q-8?~*6P+Oh7FyBRX zekLjSP~lO&J)yive{9k&kk@HR*f~q@X|m|jSW-4^(;q@gy9<} zW{loR?AIGQcG3^!>DgM*EzH*`2xDt zP~J}^-OH$thgT?x`!Cl`df1|mUw0GqdfQ1+S-!Z_`U*_*p4;p1$7E&24g4FHUARA{ zIJgTD{rPU|(NQ<=ajJRpUdM3W9Iz9BD}YB<35CcH!y2haJ%D z(CP%4OqKE6?!rxL3Tt|oK*ej+3vRqciW%j|#=Z<8mXZJCD9 ziSnVR6imuE2N&l`3b*`9PvYee{5W7%T>(?5(q9CKkcijq*QmuErz_mF{gcp9Dvrz# zoq#C=0%}l;ZotW{=N34%n;AQD`1FMa zbxSF&%)86+l#;DB93X?nI9VXxcSb z*TClXHT_jPWU}XfuI1=#_dJ{T@ymYtw}+VS+xQ0NVlW<7VsG~UUxBTi3v=KV7=;ki z__{MGU*Fu&yVeYFirC|^aaro|eO23T67^a9Uht6;#xZUm-?&3mLc8GNu_kks2@@AH ztmT&O|ExVELLE}%z}bI5G%^eCY2=w>$OIte^xU0BZ@zl|W_O3J5|AWkf$9 zXaF(0qahSy|J@2FAA%_|frtGm1{Wgd5S5GMzyTviuz0BD5KQa@q6F4w9qHE{No%_C zY0*&oooFKsB@sS7G4(RF&nTMHkzxr?1)K>kW)*fDT$R6SRVphi3It#ng>(qS=#4cq z;;KHAI!Q6XhJq*tn2A>-#_;-ayouP=!NYwxL_}_N+@_0CjqT*%{UZgp)RdifD7$Ru zrMJaZ7AE_T%tjfNov2-cQr~`9JAG;YHbt9X<{|%9>)d_*oOzIq%J%#F0+l$Ned>X3 z-tRK4MTr!+cd+ke$sIo*4zM_ige8)N-l99KMh+ny`VHom^*z0`*qekn85{|Pm-*#W zv2GH?c{0baZ)I22YZ<>XzxCJoeIkxeqcoNT@M{H~K-Am7MHS1Z8g5xKho6m_5v^s) zOzC-nU5e z*R4Pu9>_U-Gb-^i<{yYfcVY`jSN&hZT<-oqP>8|s;88dHf+4gR*M(g-5Sio!dw~Ox zaR9WU4GusIw?a^hkO`sQ=7ZSt@qAi5JM!SQ@Z4B@8o6%x-ub<}Ti)UIcxO#lYb#Uh zKhUhBI4>Z6&mB`M>CUV;8D(kX@XmP1g8X?$CuF2-4$lWN`z`uNdQ+6Bu5TGH_h;LY zWJ+H>^=NBBEs|bDC3<0SIi5G^7*76SX9-U(w_5iAR8vX*gCBE4<8XGT~{53JaZwp1^5`w~&h|tm4tg6GLGBY`ic3 zEm>Qe-&NMj+X;D_;h@r);de%@j2NULaG6RI!BT9ibAX0o0nmVZYMJ~0x!^G9`(~XJ z;07=;YqLv^N{Iuy#d*-tAb@!82W8G?C*i6X!%0GdBZON;G|X;uzZCPiwO_q7sMx!G z^SgCN>Tk!X)pF<$pk_68D~LUKp!#9QGNLs)_)!eCVOwQkSHjM4%KFIFguPkYoU|ld zZyP@2=4bf$PhQORk+ZaC{qMdbX){4O&`mABkbeqrNC2+&>0P$!*TE3DlK}&?pc{cU zs)y%Afsz>2^BkJRoWzo!B7X+=@j21gI4uYq6)AjW$hVAnw*hynScYMZGy8na)G)1{ zrkfv)@JgsYeC9e53i@`763UtBlabuUH$?4gdVj(zz0-Xo6m^W_pRzUhdUA;J1fLL} zkl`-P3>Swqei_Bp)h@Evbz<#|>Izy+g)`2FySxC`3S8vvj`k#^Yk#9R z#tO>|t?96D^=0CS)mQ+_1Ef5Q08yAI##eoyUK^z5*R>j=cr9y8^5rPFNA-Lhb85R3 z@es7B=;iFQw^OaXVBgiE7$3X9_Rnr>HXK*8`m! zsNQeAMb@r4DWbSg>je=8<5Lv~wc^)wp43-ex+F zC~g%bN0R5M!!s~t7>a16Es1#^zOcPGYhZshS1NMOFmisLFMAzEF`6fhHDAIW@56w6)4{Jbl87ktNb$d-i zla|%K{rWz!nZ2Z-$twqQ?|d+GIu6Ow!CFzxZ%ZmTkY@A8f{m< zUCF1;x&Ei4KCQ5Hz?DR$2q;3JaD{=O=g(~P|3j9`0e5I1_XMxHKfC$w6{?%{xB>C( zrs?8SQ2-6ttVMGQ4;!$F2p;=xM?#VQP^dzxpCO1l>UYp;;=;6tpIzs^oc-)g%2#Ro zjseniS>KnY*NrZsm3R?`l7EAuAJ5Y&mDb{sKX$;wFH$&N?M zMl82K=r))fb#SEROtcON*INBR6{#1_quD>aCCR&nbaA{|`xL;THwh zefyzPQo38Z8-YPu8iocz7`hwj?hqJa=%Kqi6p$9_?rxAydG5Tw`!}4=K4+i3*ZMBL zm=6G76y)I538d{Y7%mwiu^sGW6886i6wXc9A06g{)^iq_(&!v zVNK@&y);!8fls9%v9uF#Es8IH|L%ok8I2IxzcyPHH+@gKw08)}!SHzo93_*6fxrSq z)Zo$J=>H$JLRfnExj^y*t`3^gIzk5=6~zE&(g*WIGa(#>$gGWhkWM|pd0*UxK#WV~pmnW9uv zkQ~c$>zO+>_ZvgYSrfP8iA#HifFpZug5#QYSDl7nH(N3Ijjx7|seY=-a1~r~>Xk&z zgFuR-VGtCg*wUZ6vgNBBM;Z8|TKgh;`wH9oqPX%V!P;bd$KGxr{!d_Bt4NcvRbi!W zrOBX{|Grmdlba2fAR39w3}gPr+vn=RKL__o_m+ZDHl8j!iUHYJlbIDUrdfh2KOt>ar7vo7sV#?!|jM$GEJI_9GGk=ucO*|FQ{d24QK zUI;&Vb{)4}9RnmW)B5PKBGF&^8K_T|w=$fBkm*{5z0j|~h@K@3-;pLjIt~@VTi7@e zvMcm+0t2#%1?4j)!=G46PYP!n0u-)9P{V#7demEK&axu>y16o#8gZ;ba*;NtxY4ng zw^$bWdo4AgscQIpkG~Pey%~C!4zbWZxhO*s+7bX?dNVh>GudNv>1k%C=i#nUSv?rd z<@#V{GG98PLeDMxX3Tfaewl;ne1#)5ANY$^DD8GCDTwuc!h)-&*NYsMe$Fg zat@T7J6J;xQwHq4l#m-26?t^shQ2sjKr-C>pS@eME^&AdJFgF?cLU&A@VJ$377u|( zBW2!7e?QJn)zPqg&XXEH`TOs4oRoL#e*pHr+}N9s*JszU{BQ6;H&Z#pG1L%iC>jbP z{Ck9~p@~e3iI!)B{s9^7e~IkD@ZA`kUoMM+m>&v{V}uXBXcd0LNiv8b{Rn6lda&^u zyA}08jXX0pc2PQ_op^Wub3CBmCyp6V^6R$pjz zXE!3TVCF&nz~*3i-M_c2xP6m4FJ6=>&qN9=*!$-+-u?Cv>RCZeK4(gsBx35M#!-LY z#M!wbbd82#>z#72`XvB#I}o8OhH%tiHV_san!gx;%O~izEH-2sHe13n>z|&eCv|&-kZv5~3G;e3{LPUMI;}@)+(^_ z8TosnV)%0c@|ExuGASrdC5GNRjdIe~^ITY_-gAH5lsLsmK6yg9Hpw&W!k6>v3Yu1)sJC>|3L8Zsmly9b7YM)b89zybrxK*opPINLz zjn6@HGfgc@1(&1x9FJ@2rxPZX|AIi6`=>`z<+zWxX34eGyNEWX-y zxm|)6H>%G|Qq{nMtJ}9xK0T7+fGK)BR1G-t2?`G<*hY+ggA>^L0HHuc3b@*o7ac>I z0L}wT!*mOlMK75^NSH>&qrwAJ)qkKqh8K6zJOGNzT95=~Z>+{GQ`8T%t3s0k%Xlv% zqn=dJ6%hpf&sq3pf`@|&CQ}N*XK40^fSlGou)9Wp5GACOVnhGBO3C8+N_Thw4Nh-$ zq5(crmI;Mc6-jrBX_Y9Ypb&^$VbkjzZtwxVahW~W%h*{9+JLO9q?GUG2zSMltIvh6 zSbQ(#KLi}60Q4I!hWi?q#~KFW4tEpI1Gv$l5o}c13XU(-+Uwi72>+WT%$2)AsJhvd zv9^2*EoD1**cua8=#GPK%o1z$9(yeennX(MY(RBHCy(^L*gu`Vq8nZ;)a@x!~CNj z3cxZguUxxlNVr}#8j7TuC;Q@HlI6MuH+MnMa_z6gKPM}x!MmNEj;1;>1o;RsTcOd~DL(D7)zOg9|}`^Q6B;e{${) zLheW>D|UxBG!44ikT&wAJJm9z1||bb1|&6wf1|x~Js$NOE*w8--@gkKJu$3V^i1j$ z@qb{Sj*^Ri*x%WIu4C4Q?=e{thKH0O$HPfvCz652%ljA8{R~*8In#7NW@_a=8#4wh zLupbd#+)^+Oa`7d@{W)aawRS<$fh$2qZn;MpOiofq#+ANHV*v(f(Uhq=zX0!t75k}AkNi)e^B}zlrDV9{4<$VS5%;>;SIO(>7Sm$50RH|A|0MPJ(Hh%%Uo6KN{@Hp zQ8n>5_@n-Qf~IiuJM+ilu$?2AAkoWEcM6vD>%?B{@T;Sh$R6;pxggt*jPJ5g6Jas^Ek)mXGBaL+p8evnsc= zeK{K6jm~m<^Lpf@SS|I@8x{Nq^pDFpHvFi7c>NjnSzf%(pfj&d4_08#{CZF{U(D@TNk{I%=Lcj|s8$WX#Jc;ULnu zAeV)f=1+x}tFzF<(RAo{Or1E2eAw^MDdHVC({$(vhdQRfCFw>LW#B`>ESo;#YHah> zrEsCCsc8oGkEpJ&6Uv-G;f~iv)^%+Uq-{w*?)>o?a8e!beP>X`)#1+DN~fg-ct3zl zaaKi^OQ^Ih=inFY~o+O0(oIM>6j0k zNYr2y=a_H+=}B}=KmJ$R$6y^x0gKA9OnC9Cm~rxjA>=n=t?)i*f=rC3TMJI^R0KQn zr#QDG#!mRtwN%?Fp8wS7mRS2uG7axG{zIdt${cxG-tTH#@_5@0-b(g zD44dk6%QP{g7!?xaTNnShbt-k1)r#d2!W>aDJoVvL(W@sY$=ice9yv*MI z)4j)rZIhiN&msE58nSZc<+qjQ`;`e^w*k392rHg#w;SzNZJUJKUud1#S(!cSsn`0v zja8vi6DS)kp?&o^;;OS2dbK{Ou1@XeXs_dk!_%!c0s7bHl96u!9JgyJ2X5i1Z99`r zxa1c7c(()QhaBw=PO|N7j6tZ^NIo8rZ}^(V#O2n^=jDG^FWFc?SX5Z2hzx?>n-B72 z_b#a?RX2SVjk)PD3*t~>N<1gI= zD297Rvv)}plOvqJKimY)yqKSR^$e=S66;?+S$vcm&z9&}FN5%>#q)=uh2Mf^a%RsS7gr`kl z3u4kb!gI9npcPafqkrxi<|dnfi<3CNu^u#T(72>}%(#xEwsz@p9h0P`JwmEMpzq1pX zE_l?CudUm8>zp0EOZbGGmtb=x@{CgL+F_#3>Xy1X)Mb7i`urcDr)kRYl%2&#b@$5U zYBJnWB|vZf_$~h?doo}6mv7U_V_ZxI!RH(|C5ch-x-!PRJTF1RLo=NYcKk_K@qPCe z&38;d3s#~El!>y43VeJ0mO^`5!p2V72(ElUZ8)3DOsVlMz_P;sow!Y0Wyu{m_PIpp z&Hcj{&RSwmeddU-XctnyB>if__FhK0JRZHy__!_^g+8NeZ89v3!6}fs+xC&~f1rcJ#qvaGx|fjtxAc z0|gMRhz!pSmNNkq5K5j z^6V2n2#aJ(5%hD@SH&w-D7xH6JgVYPAZfQhxnjE)wyr8veBOHF%v|j_HBVNm~Df z&3Zf>L&~+!iK{?o?Xk&*!>yD9paGkuhTb?bH>LBeggUnK#pb*juc4g?6WtP_nHFnL zdt0YIo}?|e^}dfw~FDJhd>i)0Bs^C z$NN1+_bsavL5aBmatrB+Kbsq<#u*A-uDW*iXIv9Mg+*tGeI8BOO;OKcFaD*X-Mb$QwQ06GVwS(s-e{-i!Oo1hK_m2mlUz|!K zD~eCh{*B#7^ob(o%C`?cTIt1Zr#(=4vmJ#7n%dLSN3c27nHJehmtq7Mh6~mpxhX4( zj^{h%rp%s_>$X0K#h6Fwlu{_=Bi^v=_2dMgF>C)l^euK@fn!Qd+>UFDV9T>J$2s1Y z^gZs!w7fs)dyIorQ{GP7Bswk!{&-SpYs|5~(Z3zktOvbuT4!`^i~EyI4a8BhMDMz_ zgg#$IRB0wYCo+AVHpu%-CzMlF=x%EPwxXXJU^gV@%OR@3=b5je(4PYEMQ1BSe!JGO z7bG8Q(FA7MjQ{gR8BXPVv}2Nw!U{a*C64qknjy8~69y*fsTJ)0S}xIk$h0_W@Yy*B ztezPCUE^_oFm0+XyLMtyyI4OA&$}oSl0q>^GvVGAudxM?jTN3r$>P)dr{U8fav+0& znhtIT4PK3q33>Akx;s-z(+tWVvCk&La~&eDU)JNeat%F^!}8a~>vG3u52M=FkDoUr zzvxYFTQl{Ypgp6*&7^n^A(Zj7X8)%urKFXE4+W>>;3Ek}>;if`bmS#Ejrx%P09pka z!IMWJ=SRV5R1obXxr{?1;fqSbNIX4Jx%f)g{*QdH;y#v1Xt2jvIl?l&usU`@aGoOm z3ce;8&@vi`h7-*=g)S@iQ-&=!8u(oujFRvc`aXvU43K22$~{M|+U#DL@jlD?w@tY| zJH=f$?eY9?e)n*HGkThkS&SQY^P$!XaY&}|Gdeq4547aGi!IJ579I}aqW6B|0Ie;M z1W52vYx<84(;b!egwOf96P3`F1dbB(eApeL9yMUX~nggnrB(PK?MU zmhk%#PN+zG*_&+&PvXVZHmMU;%Xq*s*KoLCg9cCsow0NrL9OpKOT z7_fUv6rC{o0~?E!87z}ZL#D<~CuG;h!PB5ak=LZ$;O#%a2#2YPp0u5pAKKxxlomP7 z3b7lm579s4Am)^}nO54!_V#N4QIIyF^2%c0Bkk|!sW5TZVGjtqX6rdN;nVXfEYmKz>Iy(Q0 zMfnLN)ljXB)8A?N&{G!OKf~avwUNO0JnYyB$+o-!)fCmvjP7i?@T>kz@0K5q<`z7F zRItzXK>SqaJ>LbcO3&F*tlB4^c+GD}*^MRGx}dyt6r+zly}s((caa22Tvk@C9{i{> zI`vu+6z*t!Uvq3M7KV)m^7W3n`hiYOB&M|6wJ+*CFd^_Spw_p@MMUIxlrQAgr{j%1 zVX$KxS&@&VCPFQlUI!LcKdRrLd7*40c_2JyqrYM|5{JUWKW{GsjEzCvyOmW$Y$m0* zI0&s;%{z$8!S3D;`(16FUdVv@CuV%T1Vq6pB((ughVetKC&Ti*to=W6EWkMA^FE-B-@HwcS_qyS#vAqA2nPKC_6% zqnV=*m)SoFX8ETrE!|&m3r}i4K>&m`l=IGmp9URsr;!pd@pT{|h>8W6t#o1zyN#_8 zB7^}H=0g?2gr)MqVCPMV(dADC&Gc3NWAqNx$ar2FI_G`Io!F5MGNF!}qnK>?G~iOh z1ZORw)>mv^)8@#b?H^g=i!&|{`-gLG2a~O$dYyBA{#l%KSo_1p{KDa=kV88bVE7*8wXRI9u)1}`j?89Ys`98KP+}pVin<^SD_09a z6$7lg($TZ6Gm+SX%4LyMllgAYbmcjXRWvCn5&&f) zEfh8S=7(c$bMM)M^?7ykNo2%Zo#t)CK){Iy>GTG1o|-qFjYRt2Vb3qZ^9a4H8ChJ6 zqx{`@Rr3}2>R35IZRHU#&-%h1{Os9|aU>uB8UaGg@ZEeSA-Z39a=(V&S89`q7Yqdq z8rEGLoi|jxrgItHb_(fhQL`joy^c5Lml0aNSYiAPZ|_4;%9hq{RxxUNP#EF#8nDlj zD>BC%VP@@Lh^Q_ReBXQV(tqWHP2QOA9>qP64wW3<=KdXzUj+g-SA{vyx#fj|`J z^WovBTWtz^!BEGJuBD#y*`I@r@wOEdyJEhiot6HZ@orl)ksG%~_~!3(#J@iotAhhn z=LGH4yfQP(;qt@b?f(`fSp^JJn<|WY zY#%JfW_{ggSISd>-q%N)9q&Q0XAc`~maxZcR?KsWW|;cIv7!p3f3ig+2eU*4Y3Yzd z=Yt46X(xoX^Cw)|I-8d?tg#fBR!Jv}0SC>L>Wl!&ZGg9QEFv#DdcN$`P z(`qO&Uq1+vNTn6?{(^7dLN_Vd?eM@4v|IMf8HJ7~jTa(=d=Pz7LL{0MkxgWPW1FmU zr$0{hVozD!6(1|y@UCL=hC6>^`gP~!Bg?$Q)#%}qmEeR+vL#jQ(;~&k-V4#+rTc3< zu;gW=G-MfoyEJ@$kBJCR2j#MXo6r&9E4NIz?06ERehNJV5S()sYlA22#r=F|5a?>W z^$L;kmW-D3&7p6!&2o@|=<>+xyc<%NGvIj?S#t`NFHC5Ru63U#m)%D;i9QM)`c5h* zH%6~IZmx;;Z%X$pxx6)VDDX2oi2xo;z92p(3?WDUaMXz*y0`1qWM6qRb-fgqIpP^7 zBd!T;Vt>4|!H&{7Yg;--xF4U^aVi z!+onq$E-&`H-*EBjQlW{nu-9)I0JLKX(TycsTVoh$U3Xr9@QGA1wAW>Ld@UBE(WQ) zdd{m&hQ0*RQpHizQ8Jz>eJtMy4+^8C4jq6Mo?>taG66DNsGd_0o~QhQ36)82uKGqb zYTzkHwASnL7ny*qP_~aeiIM>4=BN%x6E3s^cH9_{@E^3GbfBG}Mak|A-a9?(cI;L`8%4 z?S_1Rn=?S$+iP4kdJ9v0JxsB0T00e0N(LvCgsNk8A@8IYa&prTsVdqGmfjJ!EZRAe zayfpOYs2ZZxU3}CVa}qIP`hT9zBb_|8B|K|ZuA4!2Axw4ia+lOy#-e|o7^5ooR>b9 zZtY$oWg2WBzkpL7?IqO?X>@0ra)2(h?hLFh)#o64q1o4EgYkD9HHk@851P&{K3Pq6 za0l@ZT%m4Vw}e|nreZvI_NBh&$rB=JJQW?w4{*V{65}hL{hfWvLM*2w%jHq~ z^?uzu#mYY!SU}NhgMOlV3u&avOEN=x5W?}T^dUNY6|A_9m>48W@Jj~$`#2O`r+BeB z3Ih$9I#!=2TIdqn{{i{YWTLF_+PHeA&K5b@TbFf+0XCJygcOcohdHi8)REcv7#+4$z`fHk(fpfMC7e)>aYa<1_A%UMspT@A-tEBfD zS+p2DZEBVcU>WEJx+v!=`GUhKEqKkAUD4I%#Ep4dGN<-p7E+OVVwHJ}Th;TRU)$R` z?pSP$1^#jf9Xg6AUqJ~3W}FPiNneHDHBKFAMF*&_z=m(!q>#0FWTJ~haZ{k6 zcv8dY1{f&Wps`#_#T+>a3d6zkPt&oZieiHDEN`{r?78Slg6j8hPAlnk@Diq&kaaZt zdu@1~ohw!v5pc@A?1ubPiM|(B>6X|<++nA58yG>Z*CJQ6+tp_6DV%`GLWY^l%kR<7 z_2obMFRBC-xtI1ruqq)Z0(nAW*<#X(rd8c}UK84y3E-w+G7cP?JCPD-nBM1vJL4U% zpOC}smXgHAUA`Ul%K6>p7SM-=Op59?k1nw7O3@MZNj20sN&VVrXMZs^gZK`vS)`n0 zNX=++vfAv#kSRzBiT;3}o2=&wN~<8r5$qWIqo-*h+y8twpa&cbzCyZzF zr4@U9a98`Ae^mEH++<>Cz(;1rfL>^?jPK2+8I^8U3jK*~Emp#2GQ(tQQIVlb={tuz$rak>$0sKzh-ed2hV=K#n%*x?SVe!-yei-xN0Suhvi1 z+p0RN^eE$fZr!5yxf&aVoizB5)0)$A!E?=aTOt_^Ww-rdNNszJsVe}TEJ8~2ho1y zO98@CS+EWfsW&31UMVG6e;PB5pptQV(F6NNF1jm2mZ+!Qpa`ce{WMqCk>yGYt$b|l zHWH{q3nwz8pjC~<{y+*2O9r>Yi z(PL$wv!1w87?^Vje9b++A>DwYMab{XMj; zeRA5GnIipZ79v37w9MzU9LfP4{Yi|pdC#5waqO;!`xT>gz7|nD%8y&U3c~OD{{!I8 z-R7D~wEqXVH*WuE4hR_oY$LwV3QN;!goLB@AxeW0(y%^|qXzT9TQveymK3Cmum+!B z?)rg_`hA9~g!giKqUe0bW*;i}MAF_p-hkc}(=+cfoP`zSs9$7~WJcIh3`kz7qa&gAPE+}y$S7p|q6sq9XE|Tl{!+ttLg~u|NoRjB zK2;4I~w!bI6^ zgqvL~4~9KjO6TKhh!L~)6EZu8?N_3I`2KG=uSTXk!FHRiSxn?MY-z5&Q+k2sI8v)+ zTiH+yhs+iF^P)01Ygs7zig}J8nB^ooLOG;{F}F=?-lCp-a+mfmu18=M z;m9Z=D)H2}%2GOjn}$*azPn1&UNeoL#(`7{j17ytGROM2n=EkkUz!dLUXwhh{`%L- zRWu)C(dK^(^=uDLuDJ`}3CvZa+i&&{!VJpoa`@$AGTgo84)^vS6BOqoI}W4wAct@W zqeOZs^RmtnSL*v!uG}{Mh?TE@7Ewb&iJN(yplG;iv>ki0cVj_+tI)0mz4fxs;t0=~ zSemr9T2zQF!c1d~-3BYv z{=C&xZDo~uV?;3xr91v;tyAma)ygiWHtxT-NNaNP;$qoqeXsOq-~+E9d+QX;>~TFZ zVlq{7sZjuqL_YRX8f71kb4=($J?-UavXy2pmP;^{O}ZsQ7!AjTqI|$u@=vDzA%Oa{ zjjm}z{{XxrVJ$=j|=FC7Fe&|&$24kVCN##~!Q zE7a7V&%(;n1i2aDTWl)5s`ME*^@9aXd^+6vFKHZWdIs#ZcSv0ZqM-i+-(tN=Y0r3S zXHdP?ExUdj|6d3t9y~!8;SOFlaMKb30vViggANx#Bq}1K-D-Gw3cYar5GwuVKabH5 z=OzaOXh%yf;3$jY-v!PPjo^>vk_jw!0u^l47p7H}=`YTu+fo`tIa~S`u0H6QF~H6;c+m2J@**dhHD3^bt5wOM z)kM0?r~u-eX;@iY6H|}O=N3#7er4wotN~!7Ok?`e`XF7DW1s`1-xQfdfV4Zc(_EaX zU!_NNQ9+nqI*RWxeyJ}jvzNOxffBTx1!LI@jk;=b(9BhwDq9H$A@dMV>LJ9#WWZWV zlM|%$$Gocc`c(3ITxT6Oi_oilu{mFsm+zf%v91Y)?_TF_tl>n7bA7}rC80y)^!AO> z!gmX45P=Ia07UA|c_l#A!R0IGc+n-d{H-T{)}!m3QSwFIZ}YKK*AK->*cCxDIMAeC z<}h9Rpo*}0y}ajKvR3f(iAR>}I$w-{ZELc-?RLq86C<{A%4CRK1=YgLx7wv5rs8Dc z9Wqpjg=RKCo`$atb{f2B(7|733xzbW?Q~A%h?WUIk&Z1TZD@Sie!xnj(a;et`a@eI-w0nb_*&o2p&NM_ytlOUX`K(P2jUw0Z>MiflQ;Q8`)<1?^xr zN=b_$OYrEe@r^zN4e(Hptb zafySaf2ikDy^rX*Cx^QqPBN;_qf>nO)(1pW}jz;hY z0z~8joDn!aK#CC+LpqsqiZBhZ>VfcFpcV3}!2xd!aYnCY;T2cu%Z3<36qBZ_{c#>z zA?$(FCi{*bGbS|zJ(>#6cg6RjBND=&$GZ+y`GA)8USuCtRibGEY>|Er9tl_%_sYM3 z8vh5-3rha-<+$C>I8U;QTg=__nq+2l?`s$ZmtC?O+Am{i9r>niS`V%1D3nRa@P4U* z0}oDIX$zSJ$Y0U;+>Un*>|R;^yBvn6D5yz`S)RG90Z8-i$)1r*%pKN|oA@QVn{D)D z*c&9p;yJrG7ztaK*>0U<(-YmI#`8@LCKma7(P=NyBD7PrnW{PI-k!JfImb7(^M&BI zRUX#is77y%{@y?M#`W z6fr($wa-8hC#Tw2 zP0?!DhCt*iRb!V)J;&MIJK!s^j)`{F2wm7t=aq}~sK311AB*#@va?MCKRLsU_n#l$ zum6&S=~ev)h=Z$({sXv6VXJ=rv>-5aWxVoTHfVSDs_8)JvDUbU%g5ixV>p8nx*^m> zJ7VhC$%qM)-PWXuMVml4ZbW0G5g^!kf{}Vq%`aseV}lK3i`AD+LZz4C58-37?sXIB z^Swb4L6h=fN~LgHS~&0H4MBtZ7+0p}sa|!E&GqiBapTygeKD(6>Rzcx{Il*0*U}E8 zzQ_q#ebjrH*lQFUf3K64VXp+=hM$I!Si+^*oh_biAhrktpP*x@TKT?tv(l)gU;my& z;v1LfHkT&YNNsV${o^tZ@d5L4>#dz&f-MCCdnqp&Cjv}OooR+nT-<+(3sKg8R;Du^ zf3L}Gphkg$OOMEWu0XvEG`6#_bWu3(er{5<4i?~J)mlC2RMBdxfFpWN88})mFpu~s z8rNit#Mz>rD{*yD>7?PsL5WAJY2(C%_k;{sN0_8RCLrW1FKrTmZjU5EC-eu~DyWv0 zV$3D8x%@!$0m~c0APfjkwuVYUwihOB$$Qvg2H}enXO6w?f3S z&61~noSXvSElwaIJy%2WyVHK8B-Z|&k3Hwxmy+i%!rnBF{{igTN4Ob|W8lvp9KZpe zrNh&gwL8&95yueVk0U@jz5s4Zl`S|k<<1Bl9B}Z3vF)U3vp~^P5ZQ)kQcyMDwUcVFh_JDGJpnB)$YeH3?4kx#Oa9zqB#L)( z&bbKI+8OIrn3jIgTvHhHdg7jOb^Xoz{rYCS^E|-_+?{3N;GZFgf@FQ^pKM_)g(qx$ zT}9ARlE*O48p&h;+CeuQE5fWQt`*hY$q+SHqy+$ZTJW}Yc*2C`9fj>$Lkn=Mzx~|u zTDO)0RR{FYckl)1jV1YIpT84-=}cd7j3Cbt_Qqw-#Z={TFQUNtl(ocdt(25)8u+bB z@!mPeNHgHwr_O5DJd(L0mjSD>p|81qrFqn*TxTgHvjb&U-9NVXI(o(%d(!6w)Q0T@ z@5IHas9X4DZS@TO?6gr^6k;764P1%ZGmkTqMQaz7Tr;2EC~J*Y3l^N3jqI(A^nO%; zo_R^UABcXDzt23J!w&c~@S@;4ggQjxMCs7;FvMlB{`lmdUuDOjb531S39x+=+bQvC z%d}tTMBBzXG;YhS)8*GzsdU~g7K`V)J|}G<{oF1+`jc{jL$BVC{T{c{D>UMxo_?Gv z1CGFYYzu`b{!;*`Lq)>B$^KZCr0JQ&n)6*=lHItytBRschR7LKlHg&7aZiy@iDAgl z$9nUMd6$Ui*IY zf4t0BH>FJFR#td#nZ9;0{JeV`9AomW+c9}Bc$B{r5$5u zl1ZAp{JR}Jmqmvap>o9>hR9>JKJSU)Avi9y!n5$GY02@+Ho7Pc3S5RWq{+EvZuiwO zRW0M-v~exEBmR#sry|#WLTD z$am6x=#;c^hnBh>Z{#idq64cZ!p08Q)uoRWN|Jy3i>PY%d}%X%!iigcRiP9F3y&De z+KCAK+)N3jD`k8g!}e~UPY>Cq$m{(qsJ|DY6Y-~tdMoeBebj556fypPo({Mk=zqUZZFo;@rGXY$j$=c^s1+L4 z7&wkQ@u7dp9L0)>==aeayf|+ z-}TpfB5@jI^29k)Ot1Jh+bcZ@wf_KEL0P67QE^=#UlnkQI4#P!^Wd^I%#$2edGQi= z9qSa-2bX5#=C7h9lUc@aY>bHIF6GI1!F2eZ zT?t~<<-!(WA-8PDUU%7?xcYN(axERP)m+6VwO}s(eq#z19e1D6Yg1FMIEmWJc+>dK zc<- znfda?U;_Q}3Z&<6bLow?LVbsXAeF{r+|f##vtEtGM4q0{e}M5QZW71&AAje(qFN&B z!Fp>NjZF<68XdKNPmL6SC~MYlR!0f#{v4*&BBfGY-1$#G;BkGEj(5~#t4?d03pP{~ zibP~EzKSZtod)Owr0iv-hhn;lvU^dAx~PnX#u>V0Kebhhjt|)xr_%F!Y5bQz*Rrmb zo0zYjSn|c^j9j!GvKfb!n$PS`*7n2i<2{CT$w~TiX_;wWkzu-Ap^d=d?zFSyZ}4pV z{@Cls)&eiX6_atSPts0+B&MD7{PdUq0O1l0j2EBc4;@%7w6ZhJi?KZq`FnMAKS~=e zTX$Xbtsz?8@kRHymw!UxQ+IpfESZ-58s#PnqIiE2e?P!Xci4dXZTvi1 zlCHawd4-~CL@k8G$<&;>Qo{$!Lyz_5e+&~vrfmKW_amnsyA4GD&|?11$t z-xNYk$4Nwm!%z;OWG_~K;j2UUvtR8N|H?i~BO~KAGhZ`fpB3AzhB0Vo?JQI0D5N%1 zXSmEk$t&WnK1Jjtuu631;^BTk4Elm-qm)LjEgevWE0Wd@i!zwa%BeP{KM!DYwCb}{ z#cCJxU)Z707xmYcQ3K#uRz?TE*A_2mECO2I9F;FFAoYqyBb{_?+QlPz8s~4e(qX1ivW4H|&f&{($i62?RD{Y3sL56oV5(%%nS2m{ z3TaDk{ZvJy(|8@xs6YxKy#XPFTmcC}1$l#YD&xO`frP;Y6nJ?0v$uxJTB+BvJU~$$ z{=M=LJ}(WkjYJZ@*6m2DBbN^ACp#9^!Qv(tzD=28qHdS@6P%55>Oqv>;zY7*`a>%F zwm5nIYKr9xzJ3wR?&HyWgzSIWeC;Xv=f6k(2e>S&+z+UXOY-NiYzU}=UBtY7c#0U$ zbH=>8HL%YhzC7#YzbiX{i-y92Jjd3b@#5 zaRy(fB$s~;z?0|Cv*9kbBw{P&v+zXn>8Ixi>xA|5gu-a^$_b-nwU=lc(~p?X8%AZB z%{6igYKA|8D0O>$9}`UL(fVXD9UqaF~lQ{P*kBmc5~<% zcjWO$rN`}KX( zz_OE$Yqd51uX|l`=iX|U_N|#-4c^*B6iSoes*zuyc3#~ohU=z?-p})!k&?Xr$BBBY z6Mk|lChpBdwsZ6BS2KliMq*~#ZLoD%E^o+b=Psme!gH96yi$`*PW~X;&(f(>BY@&< z(sbN#o$$pEgp0U0fk}Nys37diWt`KK+RBvu?e0%PiC95^7+q4qcKVe-l0KbV*51Sr zKT>UZJ0|FzGS@Oh;tPFQ+?k?o@uAE2wfVhahs#g!W;Z{K2zlraWe2;fvHMqJDeoA3 z2~~>Wj~&%5t(8rquJ$5HeiHA@Ij-C#6iCqpFCAsf^jDL&aUAs?w;U*?d^eOYpY*S| z)~4qKxMpa0H@p^%@IsLGnEO_IqUQ}2{QmVV}M9}^!*YTrUZq3=DrMl(C z<=O2wa>xCWi54MAh+zxTV4zS>A`#_zJ0o$O0yi!=&zn7uvgZ$KX-0r)QRPh8uYOcZ zgd{$h!NC=ff?^N$4yOgh{Jbr0evI0Rqqbw5G5N`yPfRe1Jb}{cR~YOd`A~3%at-fW zC2P6aw)Bd1pbe+9dP-qDktRK(Q+YTT{Ao(1eOLvo8Kx6vFoms1!R4uC4sBWUowEo2 z2SCU87@rZ0^$jWY2UD~D5`Cb#J{tUX8(<=`X3{i@&KD#i?vPUfZtg@|q5%IX(={L9 zQQK8OFr)XP0Xh945qh$h^LR01=u#W~5S3rFVd*yI?YN;KjTKL&^Y$h@-#BFz#d)!S zW5|j~cN*FTIS%e7$@|%;L@eFH;X5Ui;xrKV*pc!LHtj3S*ntomiRTLBl`THqFz@(Z zTeO~BB4qu1H<3v?8xhrJMW0w=F2>gnK&hX|@>sx@n)>+q0qJ~sAi3dQ5{Yycryys= z8(bRC#4oK~w~PN&g2oHz{1wiW;ES>pqzQxxfOj~?dk47n3eFoX%l(c4cLBV|g|fop zVYW*W=HRCiUdX3@bV`~=>{y0@2ZPGYJ`I+T)LRg~6A=kQXlNN+s)5L|6`RY@2BjbV ze!GrnwLGeA_FB$bqrsb490oSu&_x=H1@@s!}&~Wl_O!VC}7&ir{Uo z`BB&L{g3B}2>GIy&cH5K$Q-G&%agflOG)~Ki+M#GZ7omve7QR-gg)VSt(_%2JrH?m zzC4beICL2q1A-h1xYhZF6acgTva}KazX^0Nn~H}5d0+L2a!9cOY~!SxwDae~8Js?@ z-EPe*rBxO7Bs9_UPA41ldv;y@641VPNK+h}k}IhyTFBCPXiOmT1fG)j*y2Hhr=KayMnIoGcn$<2|xA^)x=U3=J zTr`@0=E0>}Bal%43%Us|wKE3&x{WO4o-F?sZ zA9+&gm0{e3Sk3YTPtfE}@1LAA4{~o?V+oRp6sfD@c!ewej6RPnk^SLE51~b`Hl2a< zlh%Sa!u<48HCdo;657rs^PRuf~1qHM?MEPnn{rT?Cd0`u(de*3iY&coFn;I z$t+YlKtsf)F(|O`Z^ZTa?7UOn=<<*IZ!_oRt%oh-aa9t4FZND;mG9n2K;8+PI7DOD zt2+wV9YtNUa2|m5GZtV*0E*)$D^wyzcQI+$!k7=8JMQ3n*nNfTpv%u&wilWeY+pTY z?~V1^iqu-XLsW~qyC<>xBH5!zj=|im^?wf8vzuqhrO;~abVli?hBc zlW@sAPJXGkiDd4C5C3>s=P>(|``!?kIdB&?WwcCX+S;PKt6q9e{=8pP=IpX@RV%Qh zPU_(E@xq8ZNEnwfbJyuQQ8<;%F%OC}9n^Ga;i)UR3+(dBHr{WJd6nz+brE?;cog4i z_Xa22kWgF4-M1}o+W4`%vDz)DzPBSS^ixDz{V{#D?fthC^O!G7d^e}oVafp?4_zGK z%jwnHcqDb@skGK7S&Z^M5$=FNVumwfm0_Pe&AyY4l}D4kl$YM8AV|m6%RZO$EHO94 zBVIJtS4>$bu>`W!@*anWK8R6H$!?wZ5>GR?yCqk@JNW9omp0u-c9P7e^2BCKcv66@ zjDbF!f~>>@AITE@Tz-de5G;z{2xHa}KvhUV+b08(IPbxT)fu&N{}wQQpDX>P^#1@4 zLGZqilvY~@Bn6dH34K9MjMb=iWiVdvlVv70-wA7ZVc8DT-9crMpvQiDhqjoH@a5k{ z>cgVtzZ!GA*w2>w&)Ke~;0@NHMwjmjG~H1A+*%i>AE#fOoO0@?#AlCYHXJ@JO5w+u z45rcYg1XPAz$5lrqp`{KhyMU; zmNNQm_^nQ>_-OTh!Rr=G{B;WS?wdq@(wkc5ytmNEUVd+Y!X*GJ@K897R!%_d27^U?Hu;e^r{ShE*@4`mOhPMRfL33Tw}J zIZDn-roQ9JbID><2=A$rw-BG2=gIchZlC_9KjqN%UlIEv{)zygUTC8zhaAu&^+3MJ zric(JK#@Z&Vj z%A(**eyAXKCz1jYQFy`&DD+TC!ybIm4X}bHQCiN>*Y0$>#27CDgU^Vr{Wd+>$mh#- zi-u>seOKR)nz-z?jCa2!bE>EwCnzYoeAEp)Gbhz^&a7h}6M!7oCYiF&Ks>n#S%tEG1Jv?=f0wC={HvmbzbxxrLO~uW1PoD z$}v?j;f7<0jyQlw1p4_aMnyWsqFQ*F^N&>xqgLjefjk791GMjgi?$WbXF0qsXsb=o zI9<5}9(>UpZGC2@g{V`vsvg}s^6}&I`?s&*IdIDQ_RiGx4MErLrC*MQr>QbDC_d=F@Qzxc90P(i*d$KOQ&7n(L2Gm2CS1+U~^No9Y&q zhf=z?V7YY@PNSaS#*j@0Q~l2fk8QJ0VdR!+Tw>R59*^boPOC4I-w$-ME zh-ft3Okmcm`NP4$Y(QSmrF|d!Y^Hf@EUfqV{wMzc*FJuJ{{RuP>_=(8_IFXa(6w_; zzMWm*ij+@%O^&G1yiA7-IC3kO);^nhoM$&2{rLP}8;jHO_FmiSx-D+UNVB~*IveQF zttP$Al?SlYnvHBbx}GM22jvSNN#xJB{ae3@wCeER+UJI$b7gYwIb~5C52x=(@il&j z^yYK;e7h{I{T+Wz)hz1RYINz`(5g6VHlqWmyc~Fe*?jI^iqA3I4tI(BSoT+8*{V&} zbeiG1RnP+uEW49v}T7K3!uK*+mT) zlokW?R7_;5REL+%R1j2C1Cpq&qX6sVvtqGm0P3EuCo2*vR>Ss8ckdUtd|a4|9A=;- zU;?y@2QG@hd(ZUbsIcr62*L=;C`4IKol2DjI&Ckg>@|%NPml{aWhC8i3^LB^8M?9d zcZ~P#>NnMnA8@(Od@sy)y&p8baLfJ>40M)|M^(Sn$MYQT4s-pt{s?f1>D6jj#m3Ks zun`JD^h7|tt@n&VY!tYvyM~KhRmW!2X?dSU%Cj4NLftkrJ2~1ri0*XTf;zpt1OEUy zYVD6r$$l8TxX+e#9>DgVv=>sNKpp$aqDR4j%dF^mFTFYc0C9^pm$PkM!Yg0f(+7SQ zz32Y`wRE3PzcriG&%+^}%velS)gS80O>f=Bbe~Sg=Cl3P{4yEr$4GDYx7SsCfnz?+ zD^uzCe=OdoC7=GD_I10my zh9VVE(%2y65db=|?o0_c~$$MjB zG6?ctIp|h+o#JfI=21~~XO;?InZ^PAQ@h|ww+kVS*kM|WXyTDpD5R`K7bJ0rSjwzX zUtoGHWYSa*?J1w)A9c?;`)j8U-#^Qt^j{JCK!^{~0>UUmK@cA_P!Q1~1Ofp7Pm%;s zGz6eOqJSS{0x9f>?4Rz(MTq>`o@#tg28ynj#f26%g2j7L1Qn5W)xd zMItA41nI^-5J|`eU?6S)?ug^NReUPl3w9sb(BGkX-{iixL&d4xeqKJKJ>4`o!uPz% zhhhrW(M3=`D>YU7tu8#46^7znE<3o&NWO)~5a@Y@(q**L^63Qg zTiPHhkYKFsT}QM2$v?FHp`m3wGyx8-?u`oEwdE`Aj?QS7mKs&HohFXv-GP*cKo^Ub zKP!eiXI9efDzvr>LEnV#xy<=XsrPl5L0JLl? zTQcsQQ2IsmK@jIocw5ti?3>c*%l1FC*S4;8mE^Ye%<#NB1z_>Q!P^ZIGG{=Xj9alb&rti)9tQHUj8Q4Zdp}?vE$LfapvI1KW$@` zjAv=ui$U(|QoC;^T{0j40QS>|eAkCQi!b)w{{WXs$v#>3y|?YYlSEs#^HQ_6U@d2H zRNPW%@(z**#d$e6GRNLO@Ydz|Y5xGUXkONR#Y^^Axp9}&xO3Z4#Pv0Q{FgjzR#m-y zLdNEm<+popT8o3|T~Q`l;BZ>@O4>aWOZLBSTHd2|ixc?Enns2Gr(xZ;rNtfMr5BFx zyeH}3PCV7 zg`qI29AyIHf}0i9sLmKW^jWo7v>-Q#?g1+iPR;HXlt~|x?qy6p6<{UB#7qUDVAyq9 zM3@L3dzE03GF3u%MH(<5135%5Pl79R**bKct3IVb?mBdwXgqA!oM+&0zIW8J)*Af2 z=k)&o7k57Y0PT-VsixCwLup-Hx4UR*{j2$qInPD#->;c|WMfv-%i0+0%H$p0;WLop zGTvhgHnpkvR{bEnaT}(;#3iq{pzIC6C8Mg;%FmW=82%#?r)InPYiU~Cy`u~kqr7x; zibNRnm)2W( zfB4>$lX{cK;TvAh>GlphIyE}3YmW^|l^I>cGy}yI0N!DHxW2E+kH$Cu01q!O^#1?` zUanELqpUjF!G_89^nmd>lz?MkNzEraV0tg3#r;KRyl4I&HDs|{o9ty2@kRyJJ+lM|G{kB)m-C8`l2Rv3maes#l7fd10x zj&c70du(l%ynVu*(R(R>Y-tq<6z?dXE+7tlQ@S0MkxBGk!dm9IaxEbJl}$14Z7r{D zYFga0j*U=S>HDdk6Dg{)>E*iSg?*vji+hQwXH))C=aC<{@8S#d-A_$;vi|_;`W#%d zDg3Bwsj`%uJ94->DWOfTbF0Jyh7T^PN%APD;A=w7_NByRsq35Xxn56 zjP8MQ&%s3p>Vj0|1f4q+1u#%msNJ^C&aHF12ZVnS!+xuLSa)S38Rekq33sWXXmH)MzdZoEIIE(c0+Mbj>oxv#fQO(bOKn{mv!an=6&e zWscKL?T@r>%hID#-(5G3d1-a6f7&#M3Ffuq<}&tv#(QSB)Tz>=ui70p>F@=^Wz-yf ze=o&yyfXc*%>Mu{N@M9(q#V}=(|C~KkU?B7<9;ib?H-v_|4%`uvBY0i0%WfG> zS0Q(IL7~+9_flv)5bTI{WS+CK(A8>MjYiP8AB$s{_cD=ec%*Fe<>Z2v-#PUzskzLO zY183rd@hX396i#C^UmGsKY;D4YO%x$w42@%W@8e%@N#zL6FfVydDgeu9YVgXQoE{} zw3=bGiN^@((R;Axym@YVW?|u%WqWOO&N^lY_nFKuvBS@gB;0b#8yVEZuSATdBp$0l zhLeJUI-YQ1K1dJ;en=o|QzW#I3vy(E_gr%3#$8p>wa#hb=62?@`uP4$RiM0n@FU2r zPF#N`kaY@GCC;P|@o>k_8B*ni{2o|`PSe^+qI-vv?^1iHx_w#U`L%iT;Nz!tPy05N zQX5}Y?s*4X+fuGPziUr~{{Y+<3(`-Fm-R2kw3n{g~V7D-n;frc@IJPtBw2G>+wHR3T2eX@8Bz%fkHCJ7c z?CtEDmP})h{v{vTyZ-qr@efWm!TK*Qe@~a~ zuGsZA<~jTR%XWiL1=XzGR+D$FZ~I32kUT@=yqWZznEY+u9&Bq?%8YR$ayM@=@LqRS zt=bU`D!UK~$H5Vgav_c};u}K7N}{8dU>80Jaq;YU@$vipz)$%^AhFI!lH2p{P)~|h^hoL%^TZ@kF-JDfwx&xBKoD(y=!__7PYJRn(AfMK!!FCrdN-jtH%!$+sD*( z_~9pQm%9G|>7{RZCI;V|h4H-gE7dZU?%Z9cU!6Ygjq_=;rCKM7mme1iaLD406Yx_! zJEocDQ(Tun(@KU1InyzaZE)qOXjKEwt_~#V!US6N4!=}T{ z@&5p(p1r;0vyCg7PoY);EO;z;3$i3hZ>EZlpr&m8>AxPzQHM-&b$W-}!90cy4K zPkSl=%FS!o2Lq%BG=qcJkVwv$@37OKS_L6ar{#S!-IyXL!@-_Pp9s{_NF%wqsAQKG zR1K=rCuw+)2HUMnto2j2WjxKG+kIA$nog%tq|b%Ura+T8CGFeEdw71Q&&TU4{{Wva z2S3s}ZeP0zzxX3{sqwdpi?|FPA{gz{ivf&&t*$mfz-)shZ8VIp)2k5 z{Z2f%O!4(!pXz!X_;a{r?lG==VQ^U%_N;G5RB>_n02g;3(rT+}I+feMZ*=}2{uM9T zv*W{)^HI5kiQ8JxXqK=8w-jgQwGE2x4F@olV_9AGA_JZCTs2#OXg3h%+cR!n#CB`}(ReE`8v+Am_VV zc`v1C(}w=ir>@OkPwBi~Zcisum&22+>2Y69yo##e1)8cJ{4REgvJ1zSQYtmy4kVq1 z6v~l`&|dI5oUTf~vEdLlTfUKsTvs-dXh<%m!b;qXvY5yVq=~Te0Tqi@rzML-0Z=(W zr39L&l?vV>)3RfcVcgQ<8BDR+D+E-17cN_|ro!Iol?3eN)@K>gZhM|!E_r9Rq-I#! z*K>eiGI}W@TJ$c4uvnNb@Nn@y3w(8cXsYat^q#moDw6^WU>i4f@ z%^GbqOL(05c*X|D&0VaHbX1ZBDVkp z?I^3`=-yf9g3l#+FM*&++5TSr$AiG zOIRvBq`2|6tK&bGJ}W&JU6(FaS*7LUk~A7!z18-mZD&dKoa)aF6x(0BbF-GU^&P+* zM$^rGJoD|w*BLhM==(OeuW3@fEpELkj;@}eM)S1bYGqlL=%OMr>y-MyXA1>?sPZZf$dhFFDg{k==XL^ z6);PS;)|;|LusBE3~&X|3h?p$dmCi^&Y5(sL~EMJ%WoW8xCNs>C|_!F=lOZRPanc| zy7s>NicM|Q@i+p&-REdBwU0h>`%A}<;ppvqolT|UA08p+=)T52o(t_ZF{fa(EP)&P zs2dzwWwcjCNUoj!Pbk48MW z^=XVHFk&A(0JPcKo? zJAbEX^tAmSRlPS_g+aq{at9q64sh^ObI-jS624w@J?>4(`S^ZkpXvGXT)8mEw-1ly z=iR8*HJiOky)8z!A9nXsZMz^-0kSaAAWv29;q#2YFS*cgapQ!z&p-QUY1F3Xmp{VR zVWdX?00O$&`W9B(v;CyhG#j&VYx_GT?Y7Y6VwMd0vn!UIFKrjrd+_%(HZcLbG&#=C0tfP;&|eRfC!V+ zM?dDGR2=GU!Quh^0U~+96DpH`3h#(Ebqq@!d%VHif#tHam&?svcKVl<>rD5RUSs0F zdGxG%I5Cgse^=!1>T(%btKNZW9Ek%qbSGHA~auIq6QpcCcID8ab*g5{f02m6w-R8u}SpE37X&(nN=bpHS@ic0vE`#I>* zLIwJu5F-c_AZP>v0RlkKGzkMIss$(n57hvJvZ;`;{MwO~L(HloOn)>P6oG{V&i-mD z6=aXm5eAR|=z`|P3jyWxT=b0f}3(=?JIIx%YoK=SE*`k?&>_YfF!kpY_9lm z$d#ooT~P;;r4eu0K{51HTSm3T2l3FT>Sy>*=(mP>W9sQFm9(MSOQwjqV5&cTxN`S# z{X;4Dd2+l{;h9fav#c=g7k=xpy7zJQoOyidi^pRxyak*8GgT^5=~B zoz!z*6PqpPUGY=KXKz)>r)SyT-Lp^7MwMH3D z0err%>EElI%75+K4qZ)Ddvn?i8(yVu?Ke%UZC;ml!r#Ksr!?75f;-wiA~A-<1$qys z^6_Qo%&hWrV`EF%>Nb@1n_AkfwR)O;8iK9GI&_@pJX7&DoMR7g4JVLaN1un1;bt?- zmO0}un)Iy)+6@}^oI3Hkw5vc0^;z6ebt=WRomaV}&gi&taWrI)bQxa*srolIK5?6R zu&vw(s+Tvn7d1nHtoJxgHq$UpJS+i##_+!X08hhk!bYfOw_Dqe)9G*1^&w?- zZCS6jsZN&-UJ8vb9nB^RRNh5bIgfkZc4&=?_?;g^$ugUl#r!=P@=ZK_$B-i&zeW28 zg)3NX!ya-2r-WeRlDPXz>F9QSq0;IGva8)}_yQ`3ANL7r`?C0Wa>BN3uSw!*ygk3d zLHjD_q{$s}f=$>1>Vj>7$q_#ccvKaa7Bu2P!3UW0RC3zCWji%`_f%sJopN*DP!Pg@ zil4jkh34hyaL@Rz_;Y0(A8B+e?K6PYdoC^N9vXG&7$5aCRZ1|M@%6vtTog<+Yiihl+V)>qZy=&;A%n(^n)mG$A)Qe`Ss#X2l)Dns348XD2? z3)du`oNtuKfhT2lifTQr875C85T@=oZ2=W?fDPFOr$g^Q7kONXlJL-W@jHfc4q=P= z@743}>N0;$`ZpJENre3QWz_Fst>$5_AhZ+9B!IqS>`g>?2JYb?=L8NOLSU(-*|^jr zjFRU$(*Tn40FFgjG)*j}r)oD8`^q-8YS1n%E^A3(EpHK*5d(#yOs;unCG%L%pA0Ut zvR3~9vl^#mEbJ~RYgUb~Qm8GXQLQo<;UvAyEioVr30yg_&nx?D=k>fi`QCV+|;vuYY;@^s5L}MQ5QCNEQcTm#nahL!(?K?qZN&MOQEo0`$#j;C(hnU;6@D97L z=zTsNxJmp!m~-ddoxjU#Y77H2xL(YCJoR`G;v;N@q-Icl2^f`9tFGzhWThila^N_+ zu2s0e;|0h0E=?I=j;WztK)@tps+4PPT-vooJ;3oh&@Ts7$vsC*6xV+d@?SeD=&h}i zvMvJ)a9>N^*;ExU-z1}7aCTfXY9mrRu{$YLQywNq?+?1hUaNe6bpHS@iF{i9ob++g zMFj#V7BYcPR00P$K#(*G`jiin0RTGl2mnwLfgora0HEw%FiOMoikmwsfb%Mdo&M-D zD4!2NRz%!EIrt*0s{A4<2Qa9;r1bbGBP5g61Dqn1^+4KzTx=H>mN_ViH?wzqc!8Gr{gkzN(r zF5FzX&H|w>%9kdjaWuP2s9Wlc)G54*tTHqASee%Qd@^<7}EPYh6S8>@s&Dd%>V{WgjJXGQ%xMP~o zM)Q$8*U{UsZlk5M?B8as#|=AbZ+ACTe;N9f?8k$Sd_1{x=YCy1hL_nY zb`9bDEen$8Z^8>q9v)G`ah2febLYpC_G9~9xVtjP_g^C7od#8;EuF^40^($D9RdjX zuU|`_CR^{C{5bPtjQeWq3r5^k;HdJ(0^;Xy?t*!4y7Tg7h8E*DT=0_Rt92RYOQdO+ zHoIN}!($v=93ou@_`Zw5I^JE$aIZz+!(K9A3KZ*80;M3}N4Q!D`4GO8z8+oqUwznp z8mmtowE|kf#vaDAibTyfG|3oWC;RDd?!(6ZKmPz;@6xh($*QmHt46Lk#V1hXrozKW zynyPTmCf--4>GZO&MR!~z8xkUv(2tj>)D+*FjTX>GIwouW9HA~z2Dl)H!FV2Km^m>#{+gZa|(MimA6u}aCM(d5>%M7O8j5)E+cxM+5dvoZ%y9$G8({~jj>Q5h@ zn~M&8Fq#?kHN9Sd1HEm-KXR~#d>-G_CaV_++FN$2)zB{(`&Vtm;l)q>=KiX0^5=N# zdu>{UYJ7B_+KoS%q!IL0jV>Y}kAfgA#&amFTDBDG4WJ>n6We_{oxPbQ;rVY1NXR9_ zV!WMXosRrB%YeE(HLTm#rx(aCft~dC#pVlLBRMN*^xn={_c~Rp1ht}=hV+pq>R0Hx ze07A6pUZ!9dqt#RmD$Oh&|rO03GU1ZL4|Q~LXal2+Q2t+h;Q8*kw^Caj=)Q6ccgNr z((m$7duv~)cBZ#>@55V$V-Koi{{Y(G)Tx!!b>rdPk(=4(mswObB!|>sAY;TB9CIU^ z*Yi%NKJHAf=D(}(arSZJKNz-^fi4VdNdN#zo_>dQ!n(#Wra{4TS~+;S4dfnbI(H&? zTaF?LIB|i_H-#2D9?tf%_eHU}vY|qk1;Yzo=QKIN=iPIGJSDs0Ij$VM6F-;E>iQYY zl*-%TrM#%>)m>Tb0nQ}Ufen&>C}@w8?J?AQc=4FcMPeKaLD~$)W8wnTZ358f@emG3 zZ0)xx$`-5PF@h#INXLuIJS7&J>6X8S3^mT;nI4(S5|9nyAUGZqg8;;Nh(ARtrq>Yo zaXe5D5Zuo%7er>m@lO610CBJ%Uy9N+y|%e$_DxD*`EzH8b#sGb=)D~#9o)Fj<>Tve z?cvJ*01gt5x+CDfq2m5&TRTGF+!fyzD*hgGAahzqMHdG*3ZoZLxotM&<~CT*IU>mC zD{9Sqi$U~W9J-lnw_+{02QrhARMO#c*d2PVoyM4#S8WYB^jvv4B#R>rrlVQJcd3#* zxi68)p6=abK$!t^RGHI~uv$khYe>G9LZGYwZh<(;F z^;_fnr}=bvUlIFA0s#O}BnY4o0uVfWP#$OlC=vtF0|*XC1LlG-f1} z5&4T7^Kf2{mT!_db6?wBL)YDWNvT^R~@=Yz^L-)LW;4C0ig3GbjK9f zZf+%Ndq!J$4&il($fVQxPC6~bTpV_Ak2SeaNINUAlVEUx(Ml^MgXW@+GMf`^MRtiu zA{K^KEkjtPL{g=9!%e>6G}!p`E(D~{IqH^58>wrXemXrVQ~pa|smb|H*q`>MyAk;P zy3wWo0OFsi$^QUycEdWpy@U9s<8Rp0A7B;zUQfx{kKvnRt7~qE)aXz%?!JTx@*uBI z4<=Xr`eQ6H{5Gwr+S1VGU)sI9hc%TO^=p^1aPwbTM?cy4Sx)n}3>dk$Bgyv-Kl+vZ z9&gM}-ao@Ec8A&v1U9uYI=14d`|&5~bAJi?e0Sk z>4C)r`;KuZ3gbC5%5G{kj~+OBGa<*A-F*~7Td~-no(=){Abd^{9*7ej`ZYM{xzwIA zT}+nGtYD~E+gf(2{)qyxZOt*)!BgfuB^>V@ZtVL>r(DG_^Gd1Ool(M1rz7x3JB5v# zT_BKhrD4GPLaYs|StrF&yZK4=oPz{&!u=;7+aEVSY`U6_m7_!MDux5@wB^Uqqa|sbTEe;QT}L)8E#Pb_ zRzGI9^(x5R~wAx=`_HA9aYsGj}eFPgE7!A=2>08 zr|ZxD88w~7P?4Rui2xATFme`J#wp|A4d4wBB)CLpakxc5adD8+M3W;V0UoM0nK7JB#s3oPQSO6mpKYW3?RWwEH zG{68rAeOJ(hLHva&mw5FNea(>Y6Y15?E7v;~U96#wUGyec`_x(1{*PHbDKUmLQ zHU3-KZ3R;0GtmjEZ< ze*XXfyxhN1x9yE%#rb2h=JoPwS9u3KEigWd$$9xNysM{dVqR2pTbH=?l2;B)ujOQG z`_~hPpGjOMRjn*-dX4H_IGa-S)vPrIoZA0rE2+-7#fD2Lou0n@^T1G`LcXdLomYzi@*H-tJ44R0z;tAPhN}Lz7{6p@sm!|ms>Hb|9 z5Ld>p+Cv3EdZ0)NKtce4$pS#o2m}cLf#><406<5Q1NxKqd^g(M%%EX5LN-4i(Syl(ZQF}V`QBib)f+z{y0N9lkGwHIZ()OzwJCFYWF@TSX z^>lyrBac7Y;?B$MzIwNed8tM_fdz8YOIY{|Qlb?rxy|7@1mG!d!AXr{eB7jpaVnzP zRDoNOHnqj99(`4#2Il8DD~@@rj5j=!lDRfWBoz!QiK_v=0wUHh0V{F{@rs4)_ywKp zUt-qpP{0c}Ej1&5ZAFzB*SPpR*Ph&-iH;ZLy|A&S2qXtLpjRB?XTfKuqG?pL3tco+ z0j{kP-H-1Jxd`2Nk>SgGb{&E2-NmKVO>X_1HDHT+U%1k@mH0<9?MJnscf`L9kUz;y z>12HSs%lTVuIi2^CUUsjw^NI@{@Rt>)$ zDnK*vRG$ET(bXOtWKKXZ7kzm2!l71}cl@mw=wDF&p-KJ~dtE^9ZAN)Zs{a7_E$V&| zCRU}y39`2%mTM3G^w;nv-U1I5m9vKOU0mb-^{I+&bqemF@ak5yS~+mmtOv?U>5nE@ z`b#(Mn%!pPVE+Jhbo2eIKUM|Ob-8{M^x$u34+%en ztHd?{(=+u7&bs~+^vC|vQ4am3rFOLQh7&HNeB305UVM zC3fNJyY95j$4r;O=d$&(gb0{~0cebU5JAQx)=(+p++hTZM9K&?yiy33^eLgAXRGQK zVkp%wXa4{Wr;Wb}@ei7sX5lNI?S`~3pKjN%GP7j32_iGqe9x$5sUxoj8m?!Eol1Z| zmXnJ47XIk;#lzeaw2wSW={==)@9hj*vX!HD5Gy!s=-=kQM~9g4exH`TuXRd>tK44q zVb5`w6}Z-!jNuIH`Yj@=)g2xa3RO_S#CoW#=CS|-9Tt%WynZH-8Sw&VXc_$%&HBE2 z>GFMFqha0D;_Ymxx}KkDO|S^2$t3Z6!;H>(@h|6<{S$s@CEPA8kVefiPS7?gG>PL4 z@dMq1GC23>Fi}-V4-nT7M~ZeIF}_r4BiBEed7MPxpI(x&KslNqHhl)uJaL5CB2g{ei7NlxYhPl~)6aph+BV)p0HfoA) z9pG%at-&GUA~^}4x);;WeWP89*OaLN4KI6$5wM1iy%*8?em}m~=YGGQxUt2_215)l zv!~=qtmZJ92>Z$l*^spczfpF0XC-mutwsQJ6~k(iYAtLsNeDCTXqHk0`_9Q-@S2R% zsI3s(s4ZX$!7r-PGAR!T`YIz>^6CuKVuDmmO&+_PU zIj@ZUkenz0fG7ZfKyDBbgaQEofpwGy5Il20LI!~#2FMx&P9Jsl@HJ=Dv)p~k6{-kl| z`&?AU7u?|ws%kv6K-}!HRTfP8Bo+fIiL>Ul60$hj#;xrbS2rh-TyxE- z7(|j}t~+QTVNoEg5(=O$39)4XIa-Lk?6xSip#K2G9wQBzmBQUsjn%H7R^j2TXAmcC zijo$#{{Y$7R9YLBx0@a!eHQe^u8*=BCbg;Fw%bpj>a!ep@VDZu;@hS?_ZPHwciFX$ zl~tGelf`Sc4-h`_BbIQ779F0na*c&XiRGPy0a=6bWY@wlm4VS7<3@~nGq&RY1@>rE8@l*vRD*COE zwPHmy-@x^me5++e;lM9MtfW~kk%Fy4AfF3_6%@muc)kTxHv!EBPYNm!0hItb06k7WSi*{vMh8g10%Z;xpLJw(S+g6F-8nsNKsS zYf2Bq29>GJe-WFp1HDU1^#p&?^5wag9rne;l6tM3w_|%Bsxow)QoS05vMAMSYUiAf*}N{(r^he0 zxn6wn`=bL<_H)_`Id!`g10yiR0^B31a5-<40uWkxqZr^aX=_Jy?m2( zPDwI3?7lbDZ8a}?Ay99&2(A_1RxorYrzEl&sn;K_j3w3vJh zMk8)$zbe@dEpy~O!0|yIy;N(`Y~8re?`Ay=N+nsx9{UH1Vc3#6WR9y$=rOdzc%jAQ z4g)v?n88BYuBQoLA<;Pn!X)PcLYq=^oKFr(0AK<}cLp)^K^DZ9cu5!#b2GOhp|qV; z76L(skswA+_Q2#?c(C%#V~LT1M9=m6^!NSxQ$kCFs{!T|t)K+r58 zApz=uK+p&h2hI>ZkR$Rz*gF{Ju>9J;)ljy=2uu}4wDwsR0VmBygZ$M>TvLv_qAh6v z0m&5?oS=x?$pVomBsoF{Kk9>1+I|zE*6PR8f znOy3{FU#ceSyY-r2{dIf`6AMr>VN^s*oS@32 z*bAm8x(p^+Xo*m7j1nYtUVC=au+Zt!yA7dDyfsM>ICYPgVor=&KWn>s{9@)6KF*6y;&C2C}f#;pB?aOIXdnvz#f=2%U zRZ-ymtnFpR^;gnk&jC3zy2e>IF>#e%T|A{Ed>!XV|U0x{t}fb4O(LVRW04L2|nqNk#C7|L? z>vE-yjT?%zp4ZZ-TUEa6*kQxs7i@B~(>6-&`#tRfvrRi~V0V=8*4{LDn;*V8E&k zbMAD<5(|reX-I<+QZW;=mxrIGxcQ!+Lx-rDc)r}dR|go}@`d8H>Q`N-_9Xkor2hcs zSjkeQeRoF;e}|!6aO!i%)k}=5+B*B9D>5USSF;bO{{Y<>ykCaK-&CPkfY*bMMegJ3 zaLd(lxLC^LYoLQGyC={pG8hSLPoe?MnF^wsl#e12T+X~4+VsI3iJ70G_}@|Y9r&82 z7K{3r+KBp<^5@*^@XBZb^yBxNA27RN?lWC0Fxc4nFVSJ?=c}z7FDG!iNQ}CN87iWt zx2b>>Rzn^ldIUskTrf6EgFxJq3f#15r@QSvYb%zz#k6bSwySD!7My}1%`GEk`R}Y_ zk2W$(>Gi&s*5%V>a@O`w?Z&uT5B7DN+;Z>l8<~-wJp5PZ@6&JP_b2rpU-qWYXd25} zTKz785C?+2R>mWnk0qb}Hz!PgsQ&;suV=Jdz(J{{RcHW@+1+sr$0xM(S-(!r(;wXMQ%hu{?&$E}GdeitIcNX(ug$ zIek+{qIq+h;%|TmA`gYfGIvp6rvQ$1G9+Ug>=lbrX>(o2f?5f2BP4RmbL6WxNK_3L z)oM63v*3}Fzg^UcZ=F16TDkA_Yr4O^QQt!g?{s+Ujz0W`Snq6|pq zvfK&Tei0G`feMXv@RKWtBT<5I<-w$Zf#>|zqBa(^`?hD^IVE?(YBIUSXj0>xEPyI3 zR7@)okxyy)1*j{nx|Cql%ZlTZLUxYYn!fA51lCh8iBhdbwF>npR$-@FlbuejH*VK3!3W~LZr`rMIh5Q$XLM-K zBmxEfLIi=JI-q|<2>^igKyyF@2h{^WLIN^?^e6&~>?&<58h&k5s)SHM*dbAMEP%2t zALyc!jxg7&f$-#H&zv@RmZ??E%zU$_L z?oY`m#p-2pttc3?Ps}AXcD>Dp4vdCBayg zp3oRGzeTH-lS?VF)m#7@u2$`*N1^s+;_jnI34&p799X$MtFXS#XwcxRT97Y0O6lp& z*QZs|?DX3b`*JDVHM7U)v$5sfsO|kHHl-dXHlrjw9m!O9$7#DscXw6H5*s_L*~x!XrIp@~~{%OzcC_&cp(O4!qJUYNqcg{=T}`XGs>cWyknDPVzfnC}pQ8?356 z7i1rbg1Uplys1WY14Z=NH#BL(ME8{gb`V5vy5nZOrA@T$i9hewyW{qR{hF_j%tCXt z_HU@$`VRE{n5D}f%IN`DO#l~altywI%EbQw=n3DCQ#XM0Mb^c zCO?$bUT<`_9|osw10qFc=hU%oa^d-jtN3Kox}(NJFKp@ncNM^&`-4{JB)>5Sms3jtBv;RsmfGVeu^mJ<_sSs7tz1GpnMM`4n=q4>UlBwX5lMo zY1#&tMP~4MuYVuZXZxd_;`}z&_q1w>W5F@BuYMf(W$L+EVQf}&>{!>THmGft}R&^*h6k0YfZg$wN{9knc04(24{YUY< zQ(%MnqhaK~Lmp;*ASt(oB*N(tO~u_dPaDHWs^yn1IQy+g*VNXiG{YtSL3y+5@SeQ4 zIO>YJ<=f;_X8_tx>&u_iGQVuoQ;IMv8=BwWRW2PlFBW{6W$w2mgxX=afeQ^8Jh>pO zpY>2gxP=5oNIprikjIh;KUH8IMvM2R)9M0t%8H$)Sh$CGL8q%_r|7eIF}~w?@?||% zRl9GkvuCJX(2av?x(C3nIdoii;p)TcJox&`vsv0M)=?lE$~S;=xunu|k9nXw>b&{= zGG9-37uUJ*_vM{u+udH@7rJ|%!hB}ZtcZyI?x=s=d2{-1Q`6{ie^E>KV%^z$O3*7* zABm{zT980(%_ma<#PWMP!H-4d&!x{Fhtc8sw+4PYwfMmtK zNg_Sd#1E@i8;zddM-E(b`)sGE-ce?qR-;@G3rxX+1QYd+YcZ^@o+#&+yt{rPPt|^l zifb7DbJXSC$(8Vmth1xhXnBNZwQ9v?O6jHGsxvI>b3paKa(qHOb444QKueOBTjTW>X*HSV4x z>bTaUZP|X#-S&rCp`}|rw0q7fmp4ztC|lDURjWMoV+5{zxy)wkSk2e1+G+YWt4iLL zD{|ce%l0}IlZgPahFx=7H>+90i1Bh?Pw9V6l76Qo-g$rOIQ2b8p4@&OeXmk9b-VgR zlik;QWb_9gMfv{#sbMka%0~gD3S0H-ewlZ{jbOwt3*J9O4FrFh0uTWJfIuKHfaFjJ z2K9(0?~HQ&wZdV2_sk=*40?l?RnDpro~tUc zjU3jI3e^AwX5_3Yl?#gFYe=9FGz1`EstG6!@DOws`Y^TAe;ak-sZ;x!eB-HB_+^Do zld4qeS3hk!j?Z>_t<6^WPY+eRabql=;n=>%vXv@seLof#Rvqn4PQyy1X|Q*dz}Rd( zO3gA;P6mN}1iQIfX}?Ygf8G8jq;d}i> zE_Fq_h;m~_&Wo_UII~#*%IJv!{;Gxqpo5yTV4>83@>!~{yNLk&)Xu z)!N#0n@j*Cfy|Om)oM*5!;vdO0A)l-iA5Xy6%pb2C?wGaK_K}nDl;f*mz013{B0kV z*Nx)Zx;fq|)azGN>)f^U-^4h6OUc&qOsuiI zC5?Mb)U4?ItRHpZp4}NuaBW&-n1>AXU3jjtiG4P1)vf3>ibKAs*Jk|J)a2{2$HqQf zXC16(>UXuzj;{-Ub+`+~pI6D3&YdO_D}YXNeDb(ffM}fI1%OG~I+aa=0#Qqym5QjD zCmWznP*zkGyBP~2+D<2RsYX>ZBX!au+`!nX7%)7%(_*TZ4QYa4DRLElGMGN_S?`OW zZfHD1q0Q(-g>Fz{)TL_I3X}_|RvB$dB(Q_m3x_8b8GJfn%bqvIWV^MUrxuQ~uY7K; z-os4y%no>Kx;W){a_I5i-%k&$bIf(UwRG_qKC9UuD%MM1!Zs#XNl_tP;}O4Ady__L zRXD&SnrtBF^%Llu7}qXw89&KmHlto$CJ4z~6_HGEfCtHM6s+1tLhTS#7)mKVVL>M0 z1i@5IfKT06GFTujqPqKGW{e1zd{-LN)H^MpzQ?naEkw6c?IT-rd<#f*JvNm{rALwf z0C=d<_kRcp(RBX++;e&RF#iDcu1xzd{%d^u%2b&`t+@h)I*z2~v)wKqvp9+H%KBea z`km>r!x=Z-{{W|-rs=;^cKkg509mWv>eqEAi;kqu(|`*6w^hyhncTlG+4LB8@kqiR zYtHq92pR>|0SFh-11J&$`ji3ypgj;9gbe_IKypCg0_KCSb_b%v^NORIsFDiEgZ4pf zA|(BkShR2VSyOvNXOgIlDvKE0BBJadNEfci0fZ9CL5ZnRHS8{_4c|$?dc>{q-DN8| z&BvX$ATQ8M;qxeQJU(ip75?$^T&qOUB_JO7324Qt`74%HVwHt8K!89>2PhmWH{GAo z=)<)e-wnMi(`o$9PfXf(*R`*%ONoYT#&Q8>-K>q@XY?oBRA}KR;J1z`YuY;<*%nh{ zUg~fJeAe8pcXt&-ZstzM0z9OCgil6OUUuV_0>bzg{6e+4l-PCufS zl05rU)eGL~ae3=2m9COB^)4Lwo@zCut%Q?1uH012};#_(z8P$pw6&`8~ z+iIZGEGw=Vc+yr?xQbv8lIxGAX}u{J^;RmXreWU|ne1&SWJ>hn&1E;L=@BoA&|_~T z0feHWH(8>xGO`;0K+9TCMbw0NaEgpnXfS0!+V_57sI{S_%X}x83dBfvk;&uaR)8Ke zI6i5x!-(`vhHB%dRM@M1({SiDhKGOa1rZTk=XdzKBRTFOc=d!yIar01AtAcMM~sq-<- zW`|GLJpw_H+K-2WK1e99BocEA87yJa67uhoJRz-tlLsVN!Cm~pjrVk3(qOsiWvqjyQ(SNr)N2h5dmeRYf8*ujS z$|7G`{{ZyUYl`yn=i800m`&HC>zeNu;_q#!pTu6b{vSx06s?)VZ)za&Yj|VkzSl>` z`dl;q{Jgxp`FCafMb5DnRbyBQcB34y8i%70c5f-$F z!U#C^MIb@`NFquANkKR!GnE3|!PwyXCdZpz(RH=;dv{~Idw}$uFTKH@@s;yAG1f|s z0`9{R#LoMb%CsZ_=7Oicb&FMs=K@zNRFVNvBnbkQ1e&Q^JrhcG_0Va>7I><^kZ4+r z+Suy1tglpC+YTFAJ{oNt{L00ojLi#2sdHJP4pqh(SG9H{vTg7^&4B_;Zo28ty*N>~ z(=1(C!UIGwbth#J$99WS;9;!lo->lD@~wAXH#KSk^WC)M^*t7CSsYfYsk)=Mc)TN* zC5u~;ps!Pn(6ZVAguFFbj>d4N#(@jfF_FzpVA3#^V_jXQR}7;}a=EG~B*H;Kw734~ zFGfC?sXban6*gF^pcrr~D%o2)Kwa>x)@41HXv%Q3B_#m}AmNUxSSoNFmMD%w1_}mF z5HBr|NNlVV6xwsQnz|3Vdq;)C(NK%sv)ZK)@J)fNIV9t3h0(o*k z$4*EI!l8`svIB(521h7RLBd7Z8}=$peod5K~@AJp#=YGNA?n&edypkCN1= zzL@JJXhysESfv`r)p2oLEovjgc`3D7O~q" - }, - { - "from": "gpt", - "value": "The man is sitting on top of piled objects or belongings loaded into the back of a pickup truck." - }, - { - "from": "human", - "value": "Is the man holding anything in his hands?" - }, - { - "from": "gpt", - "value": "Yes, the man is holding a beer in his hand while sitting on top of the objects in the back of the pickup truck." - }, - { - "from": "human", - "value": "What color is the pickup truck?" - }, - { - "from": "gpt", - "value": "The pickup truck is white." - }, - { - "from": "human", - "value": "Is the man sitting or standing?" - }, - { - "from": "gpt", - "value": "The man is sitting on top of the piled objects in the back of the pickup truck." - }, - { - "from": "human", - "value": "What could be the possible reasons for the man sitting on top of the possessions in the back of the pickup truck?" - }, - { - "from": "gpt", - "value": "There could be several reasons for the man sitting on top of his possessions in the back of the pickup truck:\n\n1. Moving: The man might be moving to a new location and needed to transport his items in a pickup truck, utilizing available space efficiently. By sitting on top of the belongings, he could be helping to stabilize and secure the items during the move, preventing them from falling or shifting during transportation.\n\n2. Lack of seating: If the cab of the pickup truck is already at full capacity or there isn't enough space for him to sit inside, the man may have chosen to sit on his possessions as an alternative seating arrangement.\n\n3. Road trip or outing: The man might be on a road trip or a casual outing with friends or family, where he is using the back of the pickup truck as an open-air seating area. By sitting on top of the loaded items, he may be enjoying the journey while savoring his beer.\n\n4. Keeping an eye on belongings: The man could be safeguarding his possessions by staying close to them, ensuring that no items are lost, stolen or damaged during the journey.\n\nRegardless of the specific reason, the image shows a person making the most of their situation, adding a touch of lightheartedness or adventure to an otherwise mundane scene." - } - ] - }, - { - "id": "000000083996", - "image": "000000083996.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many people are seen surfing in the image?" - }, - { - "from": "gpt", - "value": "The image shows several people or a group of people surfing the waves in the ocean." - }, - { - "from": "human", - "value": "What is the weather like in the image?" - }, - { - "from": "gpt", - "value": "The weather in the image is sunny." - }, - { - "from": "human", - "value": "What are the people doing in the water?" - }, - { - "from": "gpt", - "value": "The people in the water are surfing the waves on surfboards." - }, - { - "from": "human", - "value": "Can you explain the basics of surfing and the necessary equipment for beginners?" - }, - { - "from": "gpt", - "value": "Surfing is a popular water sport that involves riding waves on a surfboard while standing or lying in various positions. The main goal is to balance on the surfboard while being propelled forward by the wave's momentum. The sport originated from Polynesian culture and has evolved into a global phenomenon, attracting participants of all skill levels and backgrounds.\n\nFor beginners, there are a few essentials to get started with surfing:\n\n1. Surfboard: A beginner's surfboard should be wide, thick, and long to provide better stability and buoyancy. This makes it easier for beginners to learn balance and catch waves. Soft-top boards or foam boards are recommended for beginners, as they are more forgiving and less likely to cause injury during a wipeout.\n\n2. Wetsuit: A wetsuit is essential for most surfers, depending on the water temperature. Wetsuits are made of neoprene, providing insulation and flexibility to allow surfers to stay warm and move comfortably in the water. Thickness and type of wetsuit will vary based on the water temperature and local climate.\n\n3. Leash: A leash is a cord that attaches the surfboard to the surfer's ankle, preventing the board from drifting away after a wipeout. This is an essential safety tool and ensures that the surfboard stays within reach at all times.\n\n4. Surf Wax or Traction Pad: Surf wax or a traction pad is applied to the surfboard deck to provide grip and prevent surfers from slipping off their boards. Wax is applied in a circular pattern, while traction pads are adhesive pads placed on the deck.\n\n5. Sun Protection: Surfers should wear sunscreen, sunglasses, and, in some cases, a hat or rash guard to protect their skin from the sun's harmful rays.\n\nBefore hitting the waves, beginners should take a few lessons with an experienced instructor to learn essential techniques, safety tips, and surfing etiquette. With the right equipment and proper training, anyone can experience the thrill of catching and riding waves in the ocean." - } - ] - }, - { - "id": "000000091544", - "image": "000000091544.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many people are in the image?" - }, - { - "from": "gpt", - "value": "There are two people in the image." - }, - { - "from": "human", - "value": "What type of clothing are the people wearing in the image?" - }, - { - "from": "gpt", - "value": "The people in the image are wearing jackets or coats, possibly leather or black in color." - }, - { - "from": "human", - "value": "What activity are the two people engaged in?" - }, - { - "from": "gpt", - "value": "The two people are walking next to each other on a street, likely during the day." - }, - { - "from": "human", - "value": "Is there a traffic light visible in the image?" - }, - { - "from": "gpt", - "value": "Yes, there is a yellow traffic light visible in the image." - }, - { - "from": "human", - "value": "Can you tell if the two people are walking close together or maintaining some distance?" - }, - { - "from": "gpt", - "value": "The image shows the two people walking close together, as seen in the close-up view of them." - } - ] - }, - { - "id": "000000148540", - "image": "000000148540.jpg", - "conversations": [ - { - "from": "human", - "value": "\nAre people getting off a train or a bus in the image?" - }, - { - "from": "gpt", - "value": "People are getting off a train in the image." - }, - { - "from": "human", - "value": "Can you describe the colors of the train?" - }, - { - "from": "gpt", - "value": "The train is blue and pink in color." - }, - { - "from": "human", - "value": "What is happening at the platform when the train stops?" - }, - { - "from": "gpt", - "value": "When the train stops at the platform, the door opens, and people start to exit the train. Among them, there is a couple carrying their luggage." - }, - { - "from": "human", - "value": "Is there any luggage seen on the platform?" - }, - { - "from": "gpt", - "value": "Yes, there is a suitcase on the ground near a pole on the platform." - }, - { - "from": "human", - "value": "How many suitcases does the couple carry?" - }, - { - "from": "gpt", - "value": "Unfortunately, the image does not explicitly show the exact number of suitcases the couple is carrying." - } - ] - }, - { - "id": "000000257752", - "image": "000000257752.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow is the teddy bear positioned in the image?" - }, - { - "from": "gpt", - "value": "The teddy bear is hanging by a string or thread in the image." - }, - { - "from": "human", - "value": "What is happening to the teddy bear in terms of the surrounding elements?" - }, - { - "from": "gpt", - "value": "Water droplets are falling on the teddy bear as it's hanging by the string." - }, - { - "from": "human", - "value": "What is the background like in the image?" - }, - { - "from": "gpt", - "value": "The background of the image features a dirt wall, which could indicate an outdoor or cave-like setting." - }, - { - "from": "human", - "value": "Could you describe a possible scenario or story behind this image?" - }, - { - "from": "gpt", - "value": "The image of the teddy bear hanging by a string and having water droplets fall on it in front of a dirt wall creates an intriguing and somewhat mysterious scene. One possible interpretation of this image could be an art installation or a creative photography concept, focusing on the juxtaposition of a soft, innocent teddy bear with the harsh, natural elements of water and earth.\n\nAnother possible scenario could be that the teddy bear represents an abandoned or lost toy, left to the elements and the passage of time. The hanging position of the teddy bear and the falling water droplets might symbolize feelings of loneliness, vulnerability, or helplessness. \n\nIn either case, the image's composition and subject evoke a sense of curiosity and may lead the viewer to wonder about the intentions, emotions, or stories behind the scene. The image thus becomes a visual metaphor or prompt for creating and exploring narratives within the context of the photograph." - } - ] - }, - { - "id": "000000426016", - "image": "000000426016.jpg", - "conversations": [ - { - "from": "human", - "value": "How many orange parking meters are there in the image?\n" - }, - { - "from": "gpt", - "value": "There are two orange parking meters in the image." - }, - { - "from": "human", - "value": "What is the color of the parking meters?" - }, - { - "from": "gpt", - "value": "The parking meters are orange." - }, - { - "from": "human", - "value": "Where are the parking meters in relation to trees and bushes?" - }, - { - "from": "gpt", - "value": "The parking meters are positioned next to a tree and near some bushes." - }, - { - "from": "human", - "value": "Is there any caution tape on the parking meter?" - }, - { - "from": "gpt", - "value": "Yes, there is caution tape wrapped around the parking meter, possibly indicating that it is out of order or needs attention." - } - ] - }, - { - "id": "000000242544", - "image": "000000242544.jpg", - "conversations": [ - { - "from": "human", - "value": "What color is the horse in the image?\n" - }, - { - "from": "gpt", - "value": "The horse in the image is brown." - }, - { - "from": "human", - "value": "Is the horse stationary or in motion?" - }, - { - "from": "gpt", - "value": "The horse is in motion, walking through the field." - }, - { - "from": "human", - "value": "What is the woman doing in the image?" - }, - { - "from": "gpt", - "value": "The woman is standing in front of the brown horse and looking at it." - }, - { - "from": "human", - "value": "What is the setting of the image?" - }, - { - "from": "gpt", - "value": "The setting of the image is a field, where the brown horse is walking, and the woman is standing in front of it observing the horse." - } - ] - }, - { - "id": "000000238511", - "image": "000000238511.jpg", - "conversations": [ - { - "from": "human", - "value": "How many giraffes are in the image?\n" - }, - { - "from": "gpt", - "value": "There are four giraffes in the image." - }, - { - "from": "human", - "value": "What is the terrain like in the image?" - }, - { - "from": "gpt", - "value": "The terrain in the image is grassy, either resembling a grassy field or a grassy hill." - }, - { - "from": "human", - "value": "What is the relationship among the giraffes in the image?" - }, - { - "from": "gpt", - "value": "The giraffes are standing near each other, forming a group in the grassy area." - }, - { - "from": "human", - "value": "What is the significance or importance of giraffes living in groups?" - }, - { - "from": "gpt", - "value": "Giraffes living in groups, also known as tower or a journey, is an essential aspect of their social structure and behavior. Being social animals, their group dynamics offer them several advantages – both in terms of survival and maintaining their well-being. Some key benefits of giraffes living in groups include:\n\n1. Protection: Staying in groups helps giraffes protect themselves and their offspring from predators, such as lions and hyenas. By maintaining a collective vigilance, they can quickly identify and respond to potential threats, alerting other members of the group and helping to protect each other.\n\n2. Improved foraging: Living in groups enables giraffes to locate and access food resources more efficiently, as they can share information about the location and quality of food sources. This can be particularly beneficial during periods of food scarcity, which characterizes their arid and semi-arid habitats.\n\n3. Social interaction: For giraffes, taking part in social activities and forming bonds with other group members is essential for their emotional well-being. This includes activities such as grooming, playing, and engaging in mock fights, which help giraffes establish and maintain strong relationships, learn essential survival skills, and communicate with one another.\n\n4. Reproduction: Living in groups facilitates breeding activity among giraffes, as young, sexually mature individuals can easily find prospective mating partners. This not only ensures the continuity and propagation of their species but also contributes to maintaining genetic diversity within the population.\n\nIn summary, the group structure of giraffes is vital for their survival, well-being, and thriving in their natural habitats. By living in groups, they can better protect themselves, locate and share resources, engage in social interaction and bonding, and reproduce to sustain and strengthen their population." - } - ] - }, - { - "id": "000000284454", - "image": "000000284454.jpg", - "conversations": [ - { - "from": "human", - "value": "\nIs the street mostly empty or are there cars on it?" - }, - { - "from": "gpt", - "value": "The street is mostly empty, with only a few cars on it." - }, - { - "from": "human", - "value": "What type of traffic control device is visible in the image?" - }, - { - "from": "gpt", - "value": "An unlit stoplight is visible at the corner of the street in the image." - }, - { - "from": "human", - "value": "Are there people standing on the corners of the intersection?" - }, - { - "from": "gpt", - "value": "No, there are no people standing on the corners of the intersection in the image." - }, - { - "from": "human", - "value": "What is the condition of the vegetation along the street?" - }, - { - "from": "gpt", - "value": "The street is lined with lush green trees, indicating healthy and well-maintained vegetation." - }, - { - "from": "human", - "value": "Why might an unlit stoplight be present on an empty street?" - }, - { - "from": "gpt", - "value": "An unlit stoplight could be present on an empty street due to a few reasons. It may indicate that the traffic light is malfunctioning or temporarily turned off for repair or maintenance work. Alternatively, it could be a time of day when traffic in the area is minimal, and the local traffic management authority might have set the stoplight to remain unlit to save energy.\n\nIt is also possible that the photo was taken in an area where traffic is typically low, and the stoplight serves as a cautionary measure rather than active traffic control. The presence of few cars on the street suggests that it is not a heavily trafficked area, which might contribute to the decision of the local traffic authority to keep the stoplight unlit during less busy periods.\n\nNonetheless, the unlit stoplight contributes to the calm and uncrowded ambiance depicted in the image, as it hints at the lack of congestion and vehicle-related noise pollution commonly found in urban environments. The lush green trees lining the street further enhance the serene impression created by the mostly empty street and its unlit stoplight." - } - ] - }, - { - "id": "000000581674", - "image": "000000581674.jpg", - "conversations": [ - { - "from": "human", - "value": "How many kitchen workers are visible in the image?\n" - }, - { - "from": "gpt", - "value": "There are several kitchen workers visible in the image, which indicates that there is a team of chefs working together in the commercial kitchen." - }, - { - "from": "human", - "value": "What are the chefs doing in the kitchen?" - }, - { - "from": "gpt", - "value": "The chefs are busy preparing dishes and cooking in the commercial kitchen, gearing up for the upcoming dinner service." - }, - { - "from": "human", - "value": "What kind of kitchen is shown in the image: a household kitchen, a commercial kitchen, or a small kitchen?" - }, - { - "from": "gpt", - "value": "The image shows a commercial kitchen, where a team of chefs is working together to prepare dishes and cook for their patrons." - }, - { - "from": "human", - "value": "What are some common characteristics and layout of a commercial kitchen?" - }, - { - "from": "gpt", - "value": "Commercial kitchens are designed to handle high-volume food production and are typically found in restaurants, hotels, catering services, and other food service establishments. Common characteristics and layouts of commercial kitchens include:\n\n1. Space: A commercial kitchen is usually larger than a household kitchen to accommodate multiple chefs and staff working together, as well as the required equipment for large-scale food production.\n\n2. Zoning and Workflow: One of the crucial aspects of planning a commercial kitchen is to create zones and establish a proper workflow for efficient operation. These zones often include specific areas for food preparation, cooking, plating, cleaning, and storage. This organization allows the staff to work more efficiently and minimize cross-contamination risks.\n\n3. Equipment: Commercial kitchens are equipped with professional-grade appliances and tools, such as ranges, ovens, grills, fryers, refrigeration units, and stainless steel worktables. This heavy-duty equipment is designed for continuous, high-volume use and is built to withstand the rigors of the commercial kitchen environment.\n\n4. Ventilation: Proper ventilation is essential in a commercial kitchen, both for maintaining air quality and for removing excess heat, steam, and cooking odors. A well-designed ventilation system typically includes exhaust hoods, air supply fans, ducts, and air purification components.\n\n5. Safety and Hygiene: In a commercial kitchen, various local and national regulations govern food safety, sanitation, and hygiene practices. Some common features that contribute to a safe and sanitary kitchen include non-slip flooring, easily cleanable surfaces, designated handwashing stations, and properly stored chemicals and cleaning supplies. \n\nOverall, a commercial kitchen is a specialized working environment designed for high-volume food production and requires careful planning, sturdy equipment, efficient workflow management, and strict adherence to safety and hygiene standards." - } - ] - }, - { - "id": "000000549117", - "image": "000000549117.jpg", - "conversations": [ - { - "from": "human", - "value": "How many trays of doughnuts are visible in the image?\n" - }, - { - "from": "gpt", - "value": "There are four trays of doughnuts visible in the image." - }, - { - "from": "human", - "value": "What is the setting of this image?" - }, - { - "from": "gpt", - "value": "The setting of this image is a doughnut shop with a display case showcasing a large assortment of different types of doughnuts on trays." - }, - { - "from": "human", - "value": "Are the doughnuts on display all the same or are there different varieties?" - }, - { - "from": "gpt", - "value": "The doughnuts on display come in a variety of different types and flavors, showcasing a large assortment to choose from." - }, - { - "from": "human", - "value": "What can you say about the presentation of the doughnuts?" - }, - { - "from": "gpt", - "value": "The doughnuts are presented neatly on trays in a display case, making it visually appealing and easy for customers to view the variety of options available for purchase. The organized arrangement of the doughnuts also helps customers to pick and choose their desired treats based on their preferences for flavors and textures." - }, - { - "from": "human", - "value": "What factors might influence a customer's decision when choosing doughnuts from this display?" - }, - { - "from": "gpt", - "value": "When choosing doughnuts from this display, a customer's decision may be influenced by several factors:\n\n1. Visual appeal: The appearance of the doughnuts can play a significant role in attracting customers to try different flavors or designs. Customers might be drawn to doughnuts with unique toppings, vibrant colors, or interesting shapes.\n\n2. Variety: With a wide assortment of different doughnuts available, customers may be tempted to try new or unfamiliar flavors, or stick to their personal favorites.\n\n3. Personal preference: Customers often have personal tastes that dictate their choices—some may prefer doughnuts with fruit fillings or glazes, while others might enjoy cream-filled or chocolate-covered doughnuts.\n\n4. Dietary restrictions or considerations: Some customers may need to take into account factors such as allergies or dietary preferences, like avoiding gluten or opting for plant-based options.\n\n5. Sensitivity to taste or texture: The range of doughnuts available might cater to customers who prefer soft and fluffy doughnuts or those who like crispy and dense doughnuts.\n\nIn summary, when choosing doughnuts from this diverse display, customers may consider factors such as visual appeal, variety, personal preferences, dietary restrictions and considerations, as well as their sensitivity to taste or texture. The unique array of doughnuts showcased in the store allows customers to find options that cater to their individual tastes or even explore new flavors." - } - ] - }, - { - "id": "000000506832", - "image": "000000506832.jpg", - "conversations": [ - { - "from": "human", - "value": "What color are the boats in the image?\n" - }, - { - "from": "gpt", - "value": "The boats in the image are blue." - }, - { - "from": "human", - "value": "How are the boats positioned in the body of water?" - }, - { - "from": "gpt", - "value": "The boats are tied together, floating in a body of water, and they appear to be docked or stored in a storage type area." - }, - { - "from": "human", - "value": "What are the boats made of?" - }, - { - "from": "gpt", - "value": "The boats are made of wood." - }, - { - "from": "human", - "value": "Are the boats unique or identical in design?" - }, - { - "from": "gpt", - "value": "The boats are identical in design, suggesting that they serve a common purpose or belong to a specific organization." - }, - { - "from": "human", - "value": "What could be the reason for so many boats being tied together in this manner?" - }, - { - "from": "gpt", - "value": "There could be several reasons why so many boats are tied together in this manner:\n\n1. Storage: The boats may be stored together when not in use, creating a more organized and efficient storage system, as well as making it easier to perform maintenance and prepare the boats for use.\n\n2. Rental Services: If these boats are part of a rental service, tying them together allows for easier management and supervision of the rental inventory. It is also convenient for customers, as they can easily choose a boat from the available selection.\n\n3. Training or Group Activities: In the case of training sessions or group activities, having many identical boats tied together allows for easy access, distribution, and instruction of participants, ensuring a consistent experience for everyone involved.\n\n4. Protecting the Boats: Tying boats together can provide additional stability and protection against potential damage from water currents, winds, or collisions with other vessels.\n\nIn summary, there could be various reasons for so many blue boats being tied together, including storage, rental services, training/group activities, or protecting them from potential damage in the water." - } - ] - }, - { - "id": "000000440389", - "image": "000000440389.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the baseball player doing in the image?\n" - }, - { - "from": "gpt", - "value": "The baseball player in the image is swinging a baseball bat, attempting to hit a baseball pitch during a game." - }, - { - "from": "human", - "value": "What is the general setting of the image?" - }, - { - "from": "gpt", - "value": "The general setting of the image is a baseball game, where the baseball player is at bat and trying to hit the pitch thrown by the opposing team's pitcher." - }, - { - "from": "human", - "value": "What is the batter's position when swinging at the pitch?" - }, - { - "from": "gpt", - "value": "The batter is taking a low and close swing at the baseball pitch, indicating that they're trying to hit a low-lying ball during the game." - }, - { - "from": "human", - "value": "What specific skills are required for a batter to successfully hit a baseball pitch?" - }, - { - "from": "gpt", - "value": "Several specific skills are required for a batter to successfully hit a baseball pitch. Some of these skills include:\n\n1. Hand-eye coordination: Hitting a baseball requires exceptional hand-eye coordination to accurately judge the trajectory and speed of the incoming pitch, and adjust the swing accordingly.\n\n2. Timing: Precise timing is essential for a batter, as it determines when to initiate the swing to make contact with the ball at the ideal moment, resulting in a solid hit.\n\n3. Bat speed: Generating bat speed is crucial for hitting the ball with enough force to send it far into the field. A quicker swing can offer more power and distance on the hit.\n\n4. Body mechanics: Proper body mechanics ensure an efficient, fluid swing movement that uses the batter's entire body. This includes weight transfer, hip rotation, and upper body torque, which all contribute to generating power in the swing.\n\n5. Pitch recognition: A successful batter must be able to recognize different types of pitches (e.g., fastballs, curveballs, sliders, or changeups) and quickly adjust their swing approach accordingly.\n\n6. Plate discipline: Good plate discipline involves being selective about which pitches to swing at, refraining from swinging at bad pitches, and knowing when to take a pitch or when to attempt a hit.\n\n7. Mental toughness: Baseball is a game of failure, and it is essential for batters to maintain a strong, positive mindset, learning from their mistakes and staying focused on each individual at-bat.\n\nIn summary, a batter's success in hitting a baseball pitch relies on a combination of essential skills, like hand-eye coordination, timing, bat speed, proper body mechanics, pitch recognition, plate discipline, and mental toughness. These skills help ensure a solid and efficient swing, increasing the chances of making contact with the ball and ultimately optimizing their performance at the plate." - } - ] - }, - { - "id": "000000562138", - "image": "000000562138.jpg", - "conversations": [ - { - "from": "human", - "value": "What colors are on the bus in the image?\n" - }, - { - "from": "gpt", - "value": "The bus in the image has white and red colors." - }, - { - "from": "human", - "value": "How is the traffic situation on the street where the bus is driving?" - }, - { - "from": "gpt", - "value": "The traffic situation on the street has a group of cars, and the bus seems to be driving ahead of them. This suggests that there is at least some level of traffic, but it's not possible to determine the exact severity of the congestion from the image alone." - }, - { - "from": "human", - "value": "What type of road is the bus driving on?" - }, - { - "from": "gpt", - "value": "The bus is driving on a street. However, the specifics like the number of lanes or the type of street (residential or commercial) cannot be determined from the image alone." - }, - { - "from": "human", - "value": "Is there any notable location around the bus?" - }, - { - "from": "gpt", - "value": "Yes, the bus is traveling down a street that is located near a park." - }, - { - "from": "human", - "value": "What mode of transportation is the main focus of the image?" - }, - { - "from": "gpt", - "value": "The main focus of the image is a bus driving down a street in front of several cars." - } - ] - }, - { - "id": "000000523914", - "image": "000000523914.jpg", - "conversations": [ - { - "from": "human", - "value": "What are the main components of the building in the image?\n" - }, - { - "from": "gpt", - "value": "The main components of the building in the image are a large brick building with a tower and a clock on it. The clock is located on the tower, which is next to or part of the building." - }, - { - "from": "human", - "value": "What is the color of the sky in the image?" - }, - { - "from": "gpt", - "value": "The color of the sky in the image is a beautiful blue." - }, - { - "from": "human", - "value": "What material is the building made of?" - }, - { - "from": "gpt", - "value": "The building is made of brick." - }, - { - "from": "human", - "value": "Can you describe the design and architecture of the building with the tower clock?" - }, - { - "from": "gpt", - "value": "Based on the image, the building appears to be a large brick structure with a sizable tower that houses a clock. The presence of a tower clock indicates that the architecture of the building might be influenced by classical or historical styles, as tower clocks are often associated with older structures or landmarks.\n\nThe brick material used for the construction of the building suggests durability and a sense of timelessness, which further emphasizes the traditional and historical aesthetic. The design might also include other architectural features such as archways or decorative elements which are typical of classical building styles.\n\nIn addition to the clock, the tower itself is likely to be a prominent element of the building's architecture. Its distinct presence adds a sense of height and grandeur to the structure, making it both visually appealing and potentially a local point of interest. The tower and clock also serve a practical purpose, providing accurate timekeeping for the surrounding area and enhancing the building's overall functionality.\n\nOverall, the building with the tower clock in the image reflects an architectural style that embraces tradition, historical influences, and a sense of timelessness. The combination of brick material, the tower, and the clock make for an imposing and visually striking structure against the beautiful blue sky background." - } - ] - }, - { - "id": "000000050706", - "image": "000000050706.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the person doing with the blender on the counter?" - }, - { - "from": "gpt", - "value": "The person is using the blender on the tile counter, specifically pressing a button to operate it. It contains water inside, which is being blended." - }, - { - "from": "human", - "value": "What is the surface material of the counter where the blender is placed?" - }, - { - "from": "gpt", - "value": "The counter surface is made of tiles." - }, - { - "from": "human", - "value": "What is inside the blender?" - }, - { - "from": "gpt", - "value": "There is water inside the blender." - }, - { - "from": "human", - "value": "Is the blender in use, or is it just sitting on the counter?" - }, - { - "from": "gpt", - "value": "The blender is in use, as someone is pressing a button on it to blend the water inside." - } - ] - }, - { - "id": "000000132303", - "image": "000000132303.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many zebras are visible in the image?" - }, - { - "from": "gpt", - "value": "There are six zebras visible in the image." - }, - { - "from": "human", - "value": "What are the zebras doing in the image?" - }, - { - "from": "gpt", - "value": "The zebras are grazing on the grass at the side of a dirt road, enjoying their natural environment." - }, - { - "from": "human", - "value": "What type of road are the zebras located near?" - }, - { - "from": "gpt", - "value": "The zebras are located near a dirt road." - }, - { - "from": "human", - "value": "Are the zebras standing on top of the road or beside it?" - }, - { - "from": "gpt", - "value": "The zebras are standing beside the dirt road while grazing on the grass." - }, - { - "from": "human", - "value": "Can you describe a zebra's habitat and some characteristics of their natural environment?" - }, - { - "from": "gpt", - "value": "Zebras are native to Africa and inhabit a variety of ecosystems, including grasslands, savannas, woodlands, and even mountainous regions. Their natural environment consists of vast, open spaces with plenty of grass for grazing and an adequate water supply. Zebras are herbivorous animals, and their diet mainly consists of grass, but they can also consume leaves, shrubs, and other vegetation if necessary.\n\nOne of the key elements of a zebra's habitat is the availability of grass for grazing. Grasslands and savannas, which are among the most common habitats for zebras, offer an abundant supply of food. These ecosystems typically have a warm to hot climate, with alternating wet and dry seasons. During wet seasons, the grass grows rapidly, providing ample food for the zebras. The dry season brings a scarcity of food, prompting the zebras to move in search of better grazing opportunities.\n\nAnother essential aspect of their habitat is access to water sources. Zebras require a regular supply of water to maintain their health and well-being. In their natural environment, they can be found near rivers, lakes, or waterholes, which provide hydration and support their physiological needs.\n\nSocial structure is also an important aspect of a zebra's life. They live in groups called \"harems\" or \"herds,\" consisting of one stallion (male zebra) and a group of mares (female zebras) with their offspring. This social structure provides safety, as zebras rely on the protection and strength of the group to fend off potential predators, such as lions, hyenas, and cheetahs.\n\nIn summary, a zebra's natural environment includes open grasslands, savannas, or woodlands, providing ample food for grazing and nearby water sources for hydration. Their social structure includes living in harems or herds, which offer safety and protection against potential threats in their ecosystem." - } - ] - }, - { - "id": "000000246909", - "image": "000000246909.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat pieces of furniture can be seen in the bathroom?" - }, - { - "from": "gpt", - "value": "In the bathroom, you can see a vanity sink, a toilet, and a mirror." - }, - { - "from": "human", - "value": "What is the color of the bathroom furniture?" - }, - { - "from": "gpt", - "value": "The bathroom furniture is all white." - }, - { - "from": "human", - "value": "Is there a window in the bathroom?" - }, - { - "from": "gpt", - "value": "Yes, there is a window in the bathroom." - }, - { - "from": "human", - "value": "What type of flooring does the bathroom have?" - }, - { - "from": "gpt", - "value": "The bathroom has a tiled floor." - }, - { - "from": "human", - "value": "How would you describe the overall appearance of the bathroom?" - }, - { - "from": "gpt", - "value": "The overall appearance of the bathroom can be described as clean, plain, and well-organized, with all-white furniture and a tiled floor. The presence of the window provides some natural light and ventilation, contributing to the clean and fresh atmosphere. The bathroom contains essential items, such as a vanity sink, toilet, and mirror, making it functional and practical for everyday use." - } - ] - }, - { - "id": "000000163030", - "image": "000000163030.jpg", - "conversations": [ - { - "from": "human", - "value": "What type of animal is seen in the image?\n" - }, - { - "from": "gpt", - "value": "The image shows a cow." - }, - { - "from": "human", - "value": "What is the cow doing in the image?" - }, - { - "from": "gpt", - "value": "The cow is rummaging and eating garbage on the side of a road." - }, - { - "from": "human", - "value": "What kind of structure is near the cow?" - }, - { - "from": "gpt", - "value": "There is a brick building near the cow." - }, - { - "from": "human", - "value": "What other objects or transportation can be found in this image?" - }, - { - "from": "gpt", - "value": "In the image, there is a parked scooter under a bridge near the cow." - }, - { - "from": "human", - "value": "What could be the possible implications of the cow's actions in the image, and what might be done to address the issue?" - }, - { - "from": "gpt", - "value": "The cow rummaging and eating garbage on the side of the road suggests a lack of proper waste management in the area, resulting in garbage being dumped in public spaces. This not only poses health risks for animals like the cow as they consume potentially harmful materials, but it also has a negative impact on the environment and the aesthetics of the location.\n\nTo address this issue, it is crucial to implement proper waste management practices, such as providing community members with accessible means for disposing their garbage in a legal and environmentally friendly manner. This could include measures like installing designated trash bins at appropriate intervals, offering regular waste collection services, and promoting waste reduction and recycling initiatives.\n\nMoreover, public awareness campaigns should be carried out to educate the local population about the adverse effects of improper waste disposal on animal health, the environment, and the overall well-being of the community. Additionally, local authorities may need to enforce strict penalties for those caught dumping trash illegally, as a means to discourage such practices.\n\nIn terms of ensuring the welfare of animals like the cow, local animal protection groups or government bodies may need to step in to rescue and relocate them to more suitable environments, where they can acquire appropriate food and care. With cooperation from the community and local authorities, these efforts can lead to better health for animals, a cleaner environment, and an overall improved quality of life for residents." - } - ] - }, - { - "id": "000000048339", - "image": "000000048339.jpg", - "conversations": [ - { - "from": "human", - "value": "What activity are the people in the field participating in?\n" - }, - { - "from": "gpt", - "value": "The people in the field are participating in flying kites." - }, - { - "from": "human", - "value": "What is the condition of the sky in the image?" - }, - { - "from": "gpt", - "value": "The sky is blue and clear, which makes it a suitable day for kite flying." - }, - { - "from": "human", - "value": "Are the kites flying at a lower altitude, near trees, or at a higher altitude above the trees?" - }, - { - "from": "gpt", - "value": "The kites are flying at a higher altitude, above the trees." - }, - { - "from": "human", - "value": "How would you describe the location where the people are flying kites?" - }, - { - "from": "gpt", - "value": "The people are flying kites in a large open field or a park, which provides ample space for this outdoor activity." - }, - { - "from": "human", - "value": "Why is kite flying a popular outdoor activity, and what characteristics make an ideal day for it?" - }, - { - "from": "gpt", - "value": "Kite flying is a popular outdoor activity for several reasons:\n\n1. It is accessible to people of all ages and requires minimal equipment, which makes it an inclusive and affordable pastime. Participants can either buy a pre-made kite or make their own with simple materials.\n\n2. Flying a kite promotes physical activity, as participants usually have to keep moving to keep their kites aloft, thereby increasing their level of exercise.\n\n3. Kite flying can also be a social event, allowing people to bond with family and friends or interact with fellow kite flyers in the park or field. It encourages communication, cooperation, and friendly competition.\n\n4. This activity can also be calming and meditative for some people. The act of flying a kite requires focus, which can help individuals disconnect from daily stresses and immerse themselves in the moment.\n\nIdeal conditions for a kite flying day include:\n\n1. Adequate wind: Light to moderate wind is essential for successful kite flying. Too little wind makes it difficult for the kite to be lifted up and stay aloft, while too much wind can cause the kite to become uncontrollable or crash.\n\n2. Clear skies: A sunny, blue sky enhances the kite flying experience, as the colors and patterns of the kite can be better appreciated. Also, a clear sky usually indicates fair weather, which is more comfortable for participants.\n\n3. Open space: Ideal kite flying locations include large fields, parks, or beaches with ample space for the kites to take off and fly without the risk of getting tangled in trees, power lines, or other obstacles.\n\nIn summary, kite flying is a popular outdoor activity due to its accessibility, the physical and social benefits it offers, and its therapeutic nature. A day with clear, blue skies, light to moderate wind, and an open space like a park or field is ideal for this enjoyable pastime." - } - ] - }, - { - "id": "000000545053", - "image": "000000545053.jpg", - "conversations": [ - { - "from": "human", - "value": "Is there a boat in the image? If so, where is it?\n" - }, - { - "from": "gpt", - "value": "Yes, there is a large boat in the image, and it is parked next to a white pier or dock on the water." - }, - { - "from": "human", - "value": "What is the color of the pier in the image?" - }, - { - "from": "gpt", - "value": "The color of the pier in the image is white." - }, - { - "from": "human", - "value": "What is the weather like in the image, and how can you tell?" - }, - { - "from": "gpt", - "value": "The weather in the image appears to be cloudy, possibly indicating an oncoming storm. This can be observed by the clouds in the sky and the reflection of the cloudy sky in the waters beneath the white dock. There might also be a sunset taking place." - }, - { - "from": "human", - "value": "What elements can be seen in the sky and on the water?" - }, - { - "from": "gpt", - "value": "In the sky, there are clouds that create a dramatic scene, possibly indicating an oncoming storm or the presence of a sunset during that time. On the water, you can see boats, including a large boat sitting at the dock, and the reflection of the cloudy sky." - }, - { - "from": "human", - "value": "Are there any buildings or structures visible near the dock?" - }, - { - "from": "gpt", - "value": "Yes, there is a building near the dock, which can be seen alongside the boats and the water." - } - ] - }, - { - "id": "000000461543", - "image": "000000461543.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many skateboarders are in the image?" - }, - { - "from": "gpt", - "value": "There are three skateboarders in the image." - }, - { - "from": "human", - "value": "What are the other two skateboarders doing while one is performing a jump?" - }, - { - "from": "gpt", - "value": "The other two skateboarders are watching the skateboarder who is performing the jump, likely observing his trick and technique." - }, - { - "from": "human", - "value": "What is the skateboarder doing while being airborne?" - }, - { - "from": "gpt", - "value": "The airborne skateboarder is performing a trick, showcasing his skills and control of the skateboard as he jumps." - }, - { - "from": "human", - "value": "Where is this event taking place?" - }, - { - "from": "gpt", - "value": "The event is taking place at a skate park, which is a location specifically designed for skateboarding and other related activities." - } - ] - }, - { - "id": "000000553392", - "image": "000000553392.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many zebras are present in the image?" - }, - { - "from": "gpt", - "value": "There are three zebras present in the image." - }, - { - "from": "human", - "value": "What are the zebras doing in the image?" - }, - { - "from": "gpt", - "value": "In the image, a baby zebra is feeding from its mother while a third zebra is nearby. The baby zebra is also nuzzling the larger zebra, possibly as a form of interaction or bonding. The third zebra appears to be inspecting the other two as they stand together in the grassy area." - }, - { - "from": "human", - "value": "What is the setting of the image?" - }, - { - "from": "gpt", - "value": "The zebras are standing in a large, grassy field with tall grass surrounding them. This type of environment suggests a natural habitat where they can graze and interact with each other." - }, - { - "from": "human", - "value": "What color pattern do zebras have?" - }, - { - "from": "gpt", - "value": "Zebras have a distinctive black and white striped pattern. This pattern serves multiple purposes, such as camouflage, temperature regulation, and even deterring insects. The stripes can make it more challenging for predators like lions to single out individual zebras in a group when they are moving together." - }, - { - "from": "human", - "value": "Why is the baby zebra feeding from the mother?" - }, - { - "from": "gpt", - "value": "The baby zebra is feeding from its mother to obtain essential nutrients for growth and development. Much like other mammals, baby zebras rely on their mothers' milk during the initial stages of their life. This milk is rich in proteins, fats, and antibodies that help the young zebra build a strong immune system and gain strength for life in the wild.\n\nAs the baby zebra grows, it will gradually transition to an herbivorous diet, primarily consisting of grass. This dietary shift will take place over several months, during which the mother will continue to nurse her offspring while also teaching it essential skills needed for survival, such as finding food and water sources, as well as staying safe from predators. Feeding from the mother also provides an opportunity for bonding and nurturing between the mother and baby, fostering a strong attachment that ensures the young zebra's well-being and appropriate development." - } - ] - }, - { - "id": "000000512088", - "image": "000000512088.jpg", - "conversations": [ - { - "from": "human", - "value": "How is the lighting in the kitchen?\n" - }, - { - "from": "gpt", - "value": "The lighting in the kitchen is very dim, making it difficult to see all the details clearly." - }, - { - "from": "human", - "value": "What is the condition of the refrigerator in the image?" - }, - { - "from": "gpt", - "value": "The refrigerator in the image is knocked over, lying on its back. This indicates that there has been some sort of accident or damage in the kitchen." - }, - { - "from": "human", - "value": "What other appliances are visible in the kitchen?" - }, - { - "from": "gpt", - "value": "Apart from the refrigerator, there is an oven and a cooker present in the kitchen." - }, - { - "from": "human", - "value": "Considering the overturned refrigerator and other damage, what potential risks are there in such a situation?" - }, - { - "from": "gpt", - "value": "Having an overturned refrigerator and other damage in a kitchen poses several potential risks and hazards:\n\n1. Electrical hazards: With appliances like refrigerators, ovens, and cookers in the kitchen, they may still be connected to power, causing electrical hazards if the connections are compromised because of the damage. There's a possibility of electric shock and short-circuits, which can be dangerous for anyone present in the kitchen.\n\n2. Gas leaks: If the cooker is a gas-powered stove, the damage to the kitchen may cause gas leaks, putting people at risk of respiratory issues, fires, or even explosions if left unnoticed or unaddressed.\n\n3. Injuries: The overturned refrigerator and other damaged items can pose a risk of physical injuries for anyone trying to use the kitchen. The sharp edges and heavy objects could lead to cuts, bruises, or worse.\n\n4. Spoiled food: The refrigerator knocked over implies that the fridge has been out of commission for some time, causing the food stored to spoil. Consuming spoiled food can lead to food poisoning or other health issues.\n\n5. Pest infestation: The presence of spoiled food and possible messes created by the damage may attract pests, such as rodents and insects, which can pose hygiene issues and further aggravate the situation in the kitchen.\n\nTo address these risks, it's crucial to identify and fix the causes of damage, disconnect the power and gas supply to the affected appliances, and clean up any spills or debris. Care should be taken when handling the damaged appliances to avoid injuries. Finally, proper maintenance and a thorough inspection of the kitchen should be carried out to prevent any future accidents or damages." - } - ] - }, - { - "id": "000000206444", - "image": "000000206444.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the location of the boat in the image?\n" - }, - { - "from": "gpt", - "value": "The boat in the image is located on a river or canal, either docked at a pier or floating near the dock, depending on the specific scene being observed." - }, - { - "from": "human", - "value": "What type of environment is the boat surrounded by?" - }, - { - "from": "gpt", - "value": "The boat is surrounded by a natural environment with many trees alongside the river or canal." - }, - { - "from": "human", - "value": "Is the boat moving or stationary in the image?" - }, - { - "from": "gpt", - "value": "It is not possible to definitively determine from the image whether the boat is moving or stationary. However, we can see that the boat is either docked at a pier or floating near the dock, which implies that it is either stationary or moving at a slow pace." - }, - { - "from": "human", - "value": "Is there any specific symbol, like a cross, visible on the boat?" - }, - { - "from": "gpt", - "value": "Yes, in one of the described scenes, there is a cross on the boat, indicating that it might have a religious or cultural significance, or it could be the boat's unique design." - }, - { - "from": "human", - "value": "Can you tell me more about boats typically found in canals or rivers?" - }, - { - "from": "gpt", - "value": "Boats that are commonly found on canals or rivers vary in type, size, and purpose. Some of the most common types of boats typically seen in these waterways include:\n\n1. Recreational Boats: These boats are designed for leisure activities and can range from small rowboats, kayaks, and canoes to larger vessels like cabin cruisers and pontoon boats. Recreational boats allow people to enjoy activities such as fishing, swimming, and sightseeing along the waterways.\n\n2. Narrowboats: Narrowboats, also known as canal boats or barges, are unique to the narrow canals found in some regions such as the United Kingdom. They are typically long, slender boats designed to navigate the tight spaces of these waterways, and they commonly serve as both transportation and accommodation, functioning as floating homes.\n\n3. Houseboats: Houseboats are larger boats that have been designed to serve as a primary residence or a vacation home. They are typically equipped with living spaces, including bedrooms, kitchens, and bathrooms, and they often remain docked for extended periods, with the option to move when the owner desires.\n\n4. Commercial Boats: Some boats on rivers and canals serve commercial purposes, such as cargo transportation, passenger transportation, or tourism. Examples include ferries, water taxis, and tour boats.\n\n5. Historic or Themed Boats: In some cases, boats found on canals and rivers are replicas or restored versions of historic vessels, like steam-powered paddleboats or sailboats. These boats often serve as tourist attractions, offering a glimpse into the history of the region or the specific type of boat.\n\nIn conclusion, boats found on canals and rivers can vary significantly in terms of their design, purpose, and size. Ranging from small recreational boats to large commercial vessels or historic themed boats, they contribute to the diversity and vibrancy of the waterways they navigate." - } - ] - }, - { - "id": "000000240511", - "image": "000000240511.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat objects are present in front of the house in the image?" - }, - { - "from": "gpt", - "value": "In front of the house, there is a pile of logs, a cut tree, and a refrigerator with a painting on its side. There is also a fence and trees nearby." - }, - { - "from": "human", - "value": "What type of house is shown in the image, and where is it possibly located?" - }, - { - "from": "gpt", - "value": "The image shows a house that appears to be characteristic of a New Orleans-style architecture. The presence of a cut tree and refrigerator in front of the house may indicate that the location is in New Orleans or a similar area." - }, - { - "from": "human", - "value": "What is unique about the refrigerator?" - }, - { - "from": "gpt", - "value": "The refrigerator is unique because it has a painting on the side of it, making it a decorated outdoor refrigerator. This adds an artistic touch to the scene in front of the house." - }, - { - "from": "human", - "value": "Could you explain the possible significance of the refrigerator's decoration?" - }, - { - "from": "gpt", - "value": "The decorated refrigerator, along with its placement outdoors, might serve various purposes or make a statement. There could be multiple reasons for such decorations, and some possible explanations include:\n\n1. Artistic expression: The painting on the refrigerator's side may be a unique way for the homeowner or a local artist to express their creativity in an unconventional, attention-grabbing manner. It may also add to the overall aesthetic appeal of the house or the neighborhood.\n\n2. Celebrating the local culture: The decorated refrigerator might be a nod to the vibrant culture and artistic traditions of New Orleans or the local community. Such decorations might contribute to the city's unique character and charm.\n\n3. Statement piece: The outdoor refrigerator with its painting could be making a statement about a specific event, social issue, or even an environmental concern related to the local area. The presence of a cut tree suggests that there might have been a storm or some other event that prompted the display of the refrigerator alongside the debris.\n\n4. Repurposing: The homeowner might have chosen to repurpose the old refrigerator as an outdoor decoration, rather than discarding it or sending it to a landfill. It showcases creative reuse and may convey a message about repurposing items instead of contributing to waste.\n\nWithout further context or input from the people involved, it is difficult to pinpoint the exact motivation behind the refrigerator's decoration. However, it is evident that the painted refrigerator adds a unique element to the scene in front of the house and serves as a distinctive visual feature in the image." - } - ] - }, - { - "id": "000000469781", - "image": "000000469781.jpg", - "conversations": [ - { - "from": "human", - "value": "What type of truck is seen in the image?\n" - }, - { - "from": "gpt", - "value": "A work truck, likely belonging to a power company, is seen in the image." - }, - { - "from": "human", - "value": "What is the truck being used for?" - }, - { - "from": "gpt", - "value": "The truck is being used to lift a person into the air to work on power lines." - }, - { - "from": "human", - "value": "Is a ladder involved with the truck?" - }, - { - "from": "gpt", - "value": "Yes, there is a ladder on the truck that helps the person reach and work on the power lines." - }, - { - "from": "human", - "value": "What precautions are being taken around the work truck?" - }, - { - "from": "gpt", - "value": "Caution cones have been placed on the street around the work truck, likely to alert passersby and ensure the safety of both the worker and the public." - }, - { - "from": "human", - "value": "Can you describe the setting in which the work truck is parked?" - }, - { - "from": "gpt", - "value": "The work truck is parked on a city street next to two brick houses, indicating that the scene takes place within a residential neighborhood." - } - ] - }, - { - "id": "000000120399", - "image": "000000120399.jpg", - "conversations": [ - { - "from": "human", - "value": "What color are the furniture and walls in the room?\n" - }, - { - "from": "gpt", - "value": "The furniture and walls in the room are all white." - }, - { - "from": "human", - "value": "What type of room is it, and what furniture pieces are in the room?" - }, - { - "from": "gpt", - "value": "The room depicted in the image is a bedroom. The furniture pieces in the room include a bed, night table, lounge chair, ottoman, and a rug." - }, - { - "from": "human", - "value": "Does the bedroom have a minimalist design?" - }, - { - "from": "gpt", - "value": "Yes, the bedroom features a minimalist design with all-white color scheme and minimal furniture pieces." - }, - { - "from": "human", - "value": "What is the purpose of an ottoman in a bedroom setting?" - }, - { - "from": "gpt", - "value": "An ottoman in a bedroom setting serves various purposes. It can be used as a place to rest one's feet when seated on the lounge chair, providing extra comfort and relaxation. Ottomans may also be used as additional seating options for guests, or even as an impromptu side table to hold items such as books, a tray with a drink or snacks, or a decorative element like a throw blanket. The versatile nature of ottomans makes them a practical and stylish choice for bedroom design." - } - ] - }, - { - "id": "000000211989", - "image": "000000211989.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many men are in the image?" - }, - { - "from": "gpt", - "value": "There are two men in the image." - }, - { - "from": "human", - "value": "Where are the men standing?" - }, - { - "from": "gpt", - "value": "The men are standing near a boat dock or marina, close to the water." - }, - { - "from": "human", - "value": "Do the men seem to be posing for a picture?" - }, - { - "from": "gpt", - "value": "Yes, the men seem to be posing for a picture next to the boat dock." - }, - { - "from": "human", - "value": "What is the expression on the men's faces?" - }, - { - "from": "gpt", - "value": "The men are smiling, indicating that they are enjoying their time near the boat dock." - }, - { - "from": "human", - "value": "What are some activities people might do at a boat dock or marina?" - }, - { - "from": "gpt", - "value": "At a boat dock or marina, people can engage in various activities, including but not limited to:\n\n1. Boating: Taking boats, yachts, or other watercraft out for recreational or fishing trips.\n2. Fishing: Enjoying a relaxing day of fishing from the dock or while on a boat.\n3. Sightseeing: Admiring the surrounding scenery, boats, and marine life.\n4. Photography: Capturing pictures of the boats, water, nearby wildlife, or posing with friends and family, as shown in the image.\n5. Socializing: Meeting friends, participating in waterside events, or chatting with fellow boating enthusiasts.\n6. Water sports: Participating in sports like kayaking, paddleboarding, jet-skiing, or joining others for a day of sailing or wakeboarding. \n\nThese activities vary depending on the specific amenities and features of the dock or marina, as well as the interests or preferences of the visitors, but overall, they offer a range of fun, relaxing, and engaging experiences for people to enjoy by the water." - } - ] - }, - { - "id": "000000288397", - "image": "000000288397.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat type of bus is featured in the image?" - }, - { - "from": "gpt", - "value": "The image features a double decker bus, which has two levels for passengers to sit or stand." - }, - { - "from": "human", - "value": "What is the position of the double decker bus in the image?" - }, - { - "from": "gpt", - "value": "The double decker bus is parked or stopped at the side of a street, next to a building." - }, - { - "from": "human", - "value": "What are the surroundings of the bus in the image?" - }, - { - "from": "gpt", - "value": "The bus is surrounded by tall buildings, and one of them appears to be a multi-family housing building." - }, - { - "from": "human", - "value": "What colors are featured on the double decker bus?" - }, - { - "from": "gpt", - "value": "The double decker bus has a red and yellow color scheme." - }, - { - "from": "human", - "value": "What is the purpose of a double decker bus and what are the advantages of using one?" - }, - { - "from": "gpt", - "value": "The main purpose of a double decker bus is to transport a larger number of passengers while using the same amount of road space as a single-deck bus, making it an efficient public transportation vehicle. By having two levels for passengers to sit or stand, it can carry more people and reduce the number of vehicles needed to serve a given route, which can help alleviate traffic congestion, lower operational costs, and reduce emissions.\n\nSome advantages of using double decker buses include:\n1. Increased capacity: A double decker bus can accommodate significantly more passengers compared to a single-deck bus, which is particularly beneficial in cities with high public transportation demands.\n2. Enhanced and unobstructed views: The upper deck offers passengers a higher vantage point, providing better views of the surroundings and making the journey more enjoyable, especially in cities with interesting landscapes or architectures.\n3. Accessibility: Double decker buses often feature either low-floor designs or accessible ramps, making it easier for passengers with mobility impairments or those using wheelchairs or strollers to board and exit the bus.\n4. Efficient use of road space: By transporting more passengers within the same vehicle footprint, double decker buses help optimize road usage and overall transportation network efficiency.\n5. Reduced environmental impact: Fewer buses on the roads due to increased passenger capacity means lower overall emissions, decreased fuel consumption, and a smaller environmental impact.\n\nIn summary, a double decker bus serves as an efficient form of public transportation, offering the advantages of increased capacity, better views for passengers, accessibility, more optimal use of road space, and reduced environmental impact." - } - ] - }, - { - "id": "000000307803", - "image": "000000307803.jpg", - "conversations": [ - { - "from": "human", - "value": "Is there a baseball game happening in the image?\n" - }, - { - "from": "gpt", - "value": "Yes, there is a baseball game happening in the image, with a batter swinging a bat on a baseball field." - }, - { - "from": "human", - "value": "What position are the batter and the catcher in the image?" - }, - { - "from": "gpt", - "value": "The batter is on the batting mark, ready to hit the incoming ball, while the catcher is positioned behind the batter at the home plate." - }, - { - "from": "human", - "value": "Is there an umpire present in the image? If so, what is the umpire doing?" - }, - { - "from": "gpt", - "value": "Yes, an umpire is present in the image, and they are closely observing the game as the batter swings at the incoming ball. The umpire is holding their glove out, prepared to make any necessary calls for the game." - }, - { - "from": "human", - "value": "Can you explain the role of each person in the image during a baseball game?" - }, - { - "from": "gpt", - "value": "In the image, we see three key participants in a baseball game: the batter, the catcher, and the umpire. Each person plays a distinct role in the game.\n\n1. The Batter: The batter's primary goal is to hit the incoming ball thrown by the opposing team's pitcher. Batters want to make solid contact and hit the ball into the field to hopefully reach a base, drive in runs, or possibly hit a home run. They also pay attention to the pitcher's throws and aim to make the right decisions on whether to swing or not, based on balls and strikes.\n\n2. The Catcher: The catcher, positioned behind the home plate, is responsible for receiving the pitches thrown by their team's pitcher. Catchers play a crucial role in guiding the pitcher in terms of pitch selection, and they need to read the batters' tendencies accurately. Additionally, catchers block pitches to prevent wild pitches, passed balls, and additional advancements by base-runners. They also play a role in throwing out potential base stealers and are involved in various defensive plays.\n\n3. The Umpire: The umpire is the impartial arbitrator who enforces the rules of the baseball game. Positioned behind the catcher, the umpire observes the game closely and makes essential decisions, such as determining if a pitch is a ball or a strike, whether a batter's swing makes contact, and if a runner is safe or out. Umpires also oversee other aspects of the game, such as monitoring substitutions, inspecting the field's conditions, and enforcing any rule infractions while keeping the game fair and organized.\n\nOverall, each participant in the image has a specific role to play, which ultimately contributes to the successful unfolding of a baseball game." - } - ] - }, - { - "id": "000000004245", - "image": "000000004245.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the condition of the stop sign in the image?\n" - }, - { - "from": "gpt", - "value": "The stop sign in the image is faded and appears to be vandalized, with graffiti or splashed red substance visible on the bottom part of it." - }, - { - "from": "human", - "value": "Where is the stop sign located?" - }, - { - "from": "gpt", - "value": "The stop sign is located on the side of a street in a residential area, near an intersection." - }, - { - "from": "human", - "value": "Does the image show any other objects or infrastructure near the stop sign?" - }, - { - "from": "gpt", - "value": "The image does not provide enough information about other objects or infrastructure near the stop sign. It mainly focuses on the faded and vandalized stop sign in the residential area." - }, - { - "from": "human", - "value": "What are the consequences of having a faded and vandalized stop sign in the neighborhood?" - }, - { - "from": "gpt", - "value": "Having a faded and vandalized stop sign in the neighborhood can lead to a variety of consequences, some of which include:\n\n1. Safety concerns: Faded and vandalized stop signs can be challenging to see or read, particularly in low-light conditions or during inclement weather. This can result in drivers potentially not noticing the sign and failing to stop appropriately at the intersection. Such situations may lead to an increased risk of accidents or collisions involving pedestrians, cyclists, and other motorists.\n\n2. Aesthetics and property value: Faded and vandalized signs can contribute to making the neighborhood look unkempt or neglected. This can potentially lower property values, as it may create an impression of an uncared-for community or an area plagued with vandalism and crime. \n\n3. Negative perception of the community: The presence of graffiti on stop signs might be associated with gangs or antisocial behavior, which can create a negative image of the community. This may lead to increased anxiety or fear among residents, especially if they perceive the area as dangerous or crime-infested.\n\nTo remedy these issues, it is essential for local authorities or neighborhood associations to address the problem by either cleaning or replacing the vandalized stop signs promptly. This helps to ensure road safety and maintain a positive and clean environment, while possibly deterring further acts of vandalism." - } - ] - }, - { - "id": "000000396186", - "image": "000000396186.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat sport is being played in the image?" - }, - { - "from": "gpt", - "value": "The sport being played in the image is tennis." - }, - { - "from": "human", - "value": "What is the gender of the tennis player?" - }, - { - "from": "gpt", - "value": "The tennis player is female." - }, - { - "from": "human", - "value": "What is the tennis player wearing in the image?" - }, - { - "from": "gpt", - "value": "The female tennis player is wearing a green skirt and a white top." - }, - { - "from": "human", - "value": "What action is the tennis player performing in the image?" - }, - { - "from": "gpt", - "value": "The tennis player is swinging her racket at a ball, about to hit it during the game." - }, - { - "from": "human", - "value": "What are the essential skills needed to excel in tennis, based on this image?" - }, - { - "from": "gpt", - "value": "To excel in tennis, several essential skills are needed, and some of these can be inferred from the image of the female tennis player in action. These skills include:\n\n1. Proper technique: It is crucial to learn and execute the correct techniques for various shots like forehand, backhand, serve, and volley. The tennis player's swing in the image demonstrates the importance of technique in successfully hitting the ball and aiming it towards the desired direction on the court.\n\n2. Footwork: Good footwork is vital in tennis, as it enables the player to move quickly and efficiently around the court to reach the ball. The female player's position on the court in the image suggests that she has moved to meet the ball and prepare for a well-executed shot.\n\n3. Timing: Hitting the tennis ball at the optimal moment is crucial for accuracy, power, and control. The image captures the player just about to hit the ball, indicating the importance of timing when swinging the racket.\n\n4. Strength and endurance: Tennis requires physical strength to generate power in shots and stamina to maintain a high energy level throughout the match. The female player's swing and stance imply that she has built up the necessary strength and endurance to compete effectively in the sport.\n\n5. Strategy and decision-making: Tennis involves devising a smart game plan and making split-second decisions during play. The image does not show this directly, but it can be assumed that the player has a strategy in mind when executing her shots and reacting to her opponent's moves.\n\nIn summary, to excel in tennis, a player should focus on mastering proper techniques, improving footwork, refining timing, building strength and endurance, and developing strategic thinking and decision-making skills. These aspects can help a player perform at optimum levels and succeed in the competitive world of tennis." - } - ] - }, - { - "id": "000000401705", - "image": "000000401705.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the color of the cat in the image?" - }, - { - "from": "gpt", - "value": "The color of the cat in the image is orange, as it is an orange tabby cat." - }, - { - "from": "human", - "value": "What is the cat doing in the image?" - }, - { - "from": "gpt", - "value": "The cat is sleeping, curled up inside of a bowl in the image." - }, - { - "from": "human", - "value": "Where is the bowl with the cat located?" - }, - { - "from": "gpt", - "value": "The bowl with the cat is located on a table." - }, - { - "from": "human", - "value": "Is there any other object on the table besides the bowl with the cat?" - }, - { - "from": "gpt", - "value": "Yes, there is a yellow plate on the table as well, but the cat is lying in a separate bowl." - }, - { - "from": "human", - "value": "What can we learn about cats' behavior from this image?" - }, - { - "from": "gpt", - "value": "This image showcases a common behavior among cats, which is their tendency to curl up and sleep in small, enclosed spaces, like a bowl in this case. Cats are known to enjoy the sense of security and warmth that snug resting spots provide them. These spaces allow them to feel protected from potential threats and help them save energy by reducing heat loss during sleep. \n\nCats often sleep for an average of 12 to 16 hours a day, and they have a unique ability to adapt their sleeping habits according to their environment. For instance, in this image, the orange tabby cat has chosen a bowl on a table as its resting place, demonstrating its flexibility in finding suitable sleep locations. This behavior also illustrates cats' innate curiosity and creativity in seeking out unconventional yet cozy spots to take naps. \n\nMoreover, the image of a cat sleeping in a bowl on a table can evoke feelings of warmth, cuteness, and relaxation, which makes it a captivating and endearing sight for people who appreciate the behavior of cats." - } - ] - }, - { - "id": "000000038905", - "image": "000000038905.jpg", - "conversations": [ - { - "from": "human", - "value": "Is the bus in the photo parked or in motion?\n" - }, - { - "from": "gpt", - "value": "The bus in the photo is in motion, traveling down an asphalt road next to a sidewalk." - }, - { - "from": "human", - "value": "What type of neighborhood is the bus traveling through?" - }, - { - "from": "gpt", - "value": "The bus is traveling through a residential section of town." - }, - { - "from": "human", - "value": "Is the street busy or empty?" - }, - { - "from": "gpt", - "value": "The street is empty in the image, with the bus as the primary focus." - }, - { - "from": "human", - "value": "Does the photo show the bus leaving the residential area and crossing a viaduct?" - }, - { - "from": "gpt", - "value": "Yes, the photo shows the bus leaving the residential section of town and traveling across a viaduct." - }, - { - "from": "human", - "value": "Is the bus in the photo an old-fashioned or modern style?" - }, - { - "from": "gpt", - "value": "The bus in the photo is an old-fashioned style, which suggests the image might be an old photo." - } - ] - }, - { - "id": "000000070927", - "image": "000000070927.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many different types of animals are there in the image?" - }, - { - "from": "gpt", - "value": "There are two different types of animals in the image: giraffes and zebras." - }, - { - "from": "human", - "value": "Are the giraffes and zebras in the wild or in an enclosed area?" - }, - { - "from": "gpt", - "value": "The giraffes and zebras are in an enclosed area, which could be a pen, an outdoor enclosure, or a field with a fence." - }, - { - "from": "human", - "value": "What are the giraffes and zebras doing in the image?" - }, - { - "from": "gpt", - "value": "The giraffes and zebras are grazing in the field, likely feeding on grass or other vegetation." - }, - { - "from": "human", - "value": "Can you describe the surrounding environment where the giraffes and zebras are located?" - }, - { - "from": "gpt", - "value": "The surrounding environment is an enclosed field or pen, with trees, rocks, and possibly a fence or barrier. This setting provides a natural-like habitat for the animals, allowing them to feel more comfortable while still ensuring their safety and containment." - } - ] - }, - { - "id": "000000372726", - "image": "000000372726.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the color of the sheets on the bed?" - }, - { - "from": "gpt", - "value": "The color of the sheets on the bed is brown." - }, - { - "from": "human", - "value": "How many dressers or nightstands are in the bedroom?" - }, - { - "from": "gpt", - "value": "There are two small dressers or nightstands in the bedroom, one on either side of the bed." - }, - { - "from": "human", - "value": "What is the condition of the bedspread and pillows on the bed?" - }, - { - "from": "gpt", - "value": "The bedspread is turned down and two white pillows are exposed, contributing to the neat and tidy appearance of the bed." - }, - { - "from": "human", - "value": "What type of floor does the bedroom have?" - }, - { - "from": "gpt", - "value": "The bedroom has a tile floor." - }, - { - "from": "human", - "value": "Is there any unique decoration on the bed?" - }, - { - "from": "gpt", - "value": "Yes, there is an origami towel adorning the bed, adding a unique and artistic touch to the room." - } - ] - }, - { - "id": "000000066124", - "image": "000000066124.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhere are people flying the kites?" - }, - { - "from": "gpt", - "value": "People are flying the kites by the shore on the beach in an area that appears to be a field." - }, - { - "from": "human", - "value": "How many kites are visible in the image, and can you describe their appearance?" - }, - { - "from": "gpt", - "value": "There are a large number of kites visible in the image, which are colorful and flying everywhere in the sky." - }, - { - "from": "human", - "value": "How many people can be seen flying the kites?" - }, - { - "from": "gpt", - "value": "The image shows a couple of guys flying the kites, but there could be more people involved who are not visible in the photo." - }, - { - "from": "human", - "value": "Can you describe the benefits of flying kites as a recreational activity?" - }, - { - "from": "gpt", - "value": "Kite flying is a popular recreational activity with various physical, mental, and social benefits. Here are some of the advantages associated with flying kites:\n\n1. Physical Benefits:\n- Improves hand-eye coordination: Kite flying requires control and precision, which helps improve hand-eye coordination and fine motor skills.\n- Increases physical activity: As kite fliers run and move to maintain the kites in the air, flying kites can be a form of exercise, especially when doing it on a beach or open field where there's ample space to move around.\n- Boosts cardiovascular health: The physical activity involved in kite flying can lead to increased heart rate, which promotes better circulation and cardiovascular health.\n\n2. Mental Benefits:\n- Reduces stress: The process of controlling a kite in the sky can be a relaxing and mindful activity, helping to alleviate stress and anxiety by focusing on the present moment.\n- Enhances concentration: Kite flying requires continued focus and concentration to keep the kite aloft and stable in the wind. This practice can improve attention span and mental discipline.\n- Stimulates creativity: Kite flying lends itself to creativity, with opportunities to experiment with different types of kites, designs, and flying techniques for a unique experience.\n\n3. Social Benefits:\n- Promotes social interaction: Group kite flying, like the scene depicted in the image, can provide an opportunity for people to connect, interact, and develop friendships with others who share this interest.\n- Encourages outdoor play: Kite flying is an outdoor activity that encourages people of all ages to spend time outside, enjoy the fresh air, and connect with nature.\n- Caters to all ages: Kite flying is a versatile activity that can be enjoyed by people of all ages, making it a popular choice for family outings, community events, or friendly gatherings.\n\nIn summary, kite flying offers a range of physical, mental, and social benefits, making it a well-rounded recreational activity suitable for various people and contexts. By flying kites, individuals can improve their hand-eye coordination, concentration, cardiovascular health, creativity, social interactions, and overall well-being while enjoying the outdoors." - } - ] - }, - { - "id": "000000015404", - "image": "000000015404.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the main feature of the sky in the image?\n" - }, - { - "from": "gpt", - "value": "The main feature of the sky in the image is the presence of large, gray storm clouds, which make the sky appear dark and threatening." - }, - { - "from": "human", - "value": "What is the condition of the hillside in the image?" - }, - { - "from": "gpt", - "value": "The hillside in the image is lush and green, suggesting a healthy and vibrant landscape." - }, - { - "from": "human", - "value": "Is there a road visible in this image?" - }, - { - "from": "gpt", - "value": "Yes, there is a paved road visible in the image, which runs through the grassy hill." - }, - { - "from": "human", - "value": "What kind of weather is indicated by the presence of storm clouds in the image?" - }, - { - "from": "gpt", - "value": "The presence of storm clouds in the image indicates an incoming storm or a change in weather. These clouds bring rain, which can make the landscape greener and more vibrant, but can also lead to potential challenges like flooding or slippery roads. The overall atmosphere of the image suggests an impending shift in weather conditions, with the approaching storm clouds making the sky darker." - }, - { - "from": "human", - "value": "How can storm clouds impact the surrounding environment, and what do they imply about the climate in the area?" - }, - { - "from": "gpt", - "value": "Storm clouds can have both positive and negative impacts on the surrounding environment. On the positive side, they bring much-needed rainfall, which nourishes the soil, promotes plant growth, and replenishes water sources such as rivers, lakes, and groundwater reserves. In the image, the lush green hillside suggests that the area might benefit from regular rainfall, contributing to its healthy and vibrant landscape.\n\nOn the negative side, storm clouds can cause severe weather, which may lead to various challenges and disruptions. Heavy rainfall may result in flooding, making roads impassable, causing property damage, and posing risks to human life and wildlife. Storms can also bring strong winds, thunder, and lightning, which can cause power outages, damage to infrastructure or vegetation, and even start wildfires in some cases.\n\nThe presence of storm clouds in the area implies that the climate may be characterized by periods of rainfall or a mix of sunny and rainy days, which contribute to the region's lush landscape. However, the potential for severe weather suggests the need for caution, preparedness, and planning to address any weather-related challenges or emergencies that may arise." - } - ] - }, - { - "id": "000000335111", - "image": "000000335111.jpg", - "conversations": [ - { - "from": "human", - "value": "How many bikes are laying on the sidewalk in the image?\n" - }, - { - "from": "gpt", - "value": "In the image, there are 2 bikes laying on their sides on the sidewalk." - }, - { - "from": "human", - "value": "Is there someone standing near the row of bicycles?" - }, - { - "from": "gpt", - "value": "Yes, there is a woman standing in front of the row of bicycles. She is wearing a black coat with a hood and is standing near a bus stop." - }, - { - "from": "human", - "value": "Are the bicycles located near a particular landmark or facility?" - }, - { - "from": "gpt", - "value": "The bicycles are located near a bus stop, indicating a connection between different modes of transportation accessible in the area." - }, - { - "from": "human", - "value": "What possible reasons might there be for the bikes to be laying on their sides?" - }, - { - "from": "gpt", - "value": "There could be several possible reasons for the bikes laying on their sides:\n\n1. Accidental Knock-over: Someone may have accidentally knocked the bikes over, either due to carelessness or while trying to make space on a crowded sidewalk. In such cases, a passerby or the bike owner might have bumped into the bikes and caused them to fall.\n\n2. Vandalism or Prank: Some individuals may intentionally knock over bicycles as an act of mischief, vandalism or playing a prank. This disrespectful behavior can create inconveniences for the bike owners and others who need to use the sidewalk.\n\n3. Poor Maintenance or Bike Stand: Sometimes, bikes can fall over due to issues related to bike stands or maintenance. A broken or unstable bike stand may not provide adequate support, causing the bikes to topple over. Additionally, if the bikes are not well-maintained, parts such as the tire pressure, loose brakes, or an uneven center of gravity can lead to a bike falling over when parked.\n\n4. Weather Conditions: Strong winds, heavy rain, or other severe weather conditions can cause bikes to fall over, especially if they are not secured properly.\n\nThough it's challenging to pinpoint the exact reason without more information about the situation, these are some possibilities that could have caused the bikes to be laying on their sides in the image." - } - ] - }, - { - "id": "000000236740", - "image": "000000236740.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the color of the teddy bear in the image?\n" - }, - { - "from": "gpt", - "value": "The color of the teddy bear in the image is brown." - }, - { - "from": "human", - "value": "What is the teddy bear wearing?" - }, - { - "from": "gpt", - "value": "The teddy bear is wearing a red bow." - }, - { - "from": "human", - "value": "Where is the teddy bear positioned in the image?" - }, - { - "from": "gpt", - "value": "The teddy bear is sitting on a bed against the headboard." - }, - { - "from": "human", - "value": "Is the teddy bear described as cute or old?" - }, - { - "from": "gpt", - "value": "The teddy bear is described as both cute and old, which can indicate that it has been cherished and well-loved over the years." - }, - { - "from": "human", - "value": "What is another possible location where the teddy bear might be placed?" - }, - { - "from": "gpt", - "value": "Another possible location for the teddy bear could be sitting on a chair leaning against the side, or sitting up against a stair railing. These alternate locations show that the teddy bear can be positioned in various places within a home." - } - ] - }, - { - "id": "000000182020", - "image": "000000182020.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the cat resting on in the image?\n" - }, - { - "from": "gpt", - "value": "The cat is resting on top of a laptop and a sweater." - }, - { - "from": "human", - "value": "Where is the cat located?" - }, - { - "from": "gpt", - "value": "The cat is located on a desk." - }, - { - "from": "human", - "value": "What is the cat doing?" - }, - { - "from": "gpt", - "value": "The cat is laying down, snuggling up to the sweater." - }, - { - "from": "human", - "value": "Is there a folded blanket next to the cat?" - }, - { - "from": "gpt", - "value": "Yes, there is a folded blanket next to the cat, sitting on the desk." - }, - { - "from": "human", - "value": "Do cats typically enjoy laying on warm objects, and if so, why?" - }, - { - "from": "gpt", - "value": "Yes, cats typically enjoy laying on warm objects. There are several reasons for this behavior:\n\n1. Seeking warmth: Cats, like most animals, are drawn to warmth. Their body temperature is slightly higher than that of humans, so they might feel more comfortable when they are warmer. Laying on warm objects, such as a laptop or sunlit spot, allows them to conserve energy and stay cozy.\n\n2. Safety and comfort: Warm and soft surfaces often provide a sense of security and comfort for cats. Laying on warm objects like blankets, sweaters, or laptops can make cats feel more secure as these objects often retain some of the owner's scent, providing a familiar and reassuring environment for the cat.\n\n3. Heat regulation: Cats are better equipped to handle higher temperatures than cold, so they seek warmth to help regulate their body temperature. Laying on warm surfaces can help them stay at a comfortable temperature, especially in colder months or in air-conditioned environments.\n\n4. Attention: Cats may also lay on warm objects, such as laptops, to gain their owner's attention. If a cat notices that their owner spends a significant amount of time on the laptop, they may choose to lay on it to gain some attention or affection.\n\nOverall, cats love laying on warm objects for various reasons, including seeking warmth, feeling secure, and regulating their body temperature. It can also be a way for them to receive attention from their owners." - } - ] - }, - { - "id": "000000523007", - "image": "000000523007.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many tennis players are in the image?" - }, - { - "from": "gpt", - "value": "There are two tennis players in the image." - }, - { - "from": "human", - "value": "Are the tennis players shaking hands with their opponents or just standing on their side of the net?" - }, - { - "from": "gpt", - "value": "The tennis players are shaking hands with their opponents by extending their arms and reaching across the net." - }, - { - "from": "human", - "value": "What are the tennis players holding in their hands?" - }, - { - "from": "gpt", - "value": "The tennis players are holding racquets in their hands." - }, - { - "from": "human", - "value": "What does the handshaking gesture suggest about the tennis match?" - }, - { - "from": "gpt", - "value": "The handshaking gesture between the tennis players suggests that the match has just ended, and they are showing sportsmanship and respect towards their opponents. It is a traditional etiquette followed in tennis and many other sports as a symbol of fair competition and acknowledgement of the players' efforts. The handshake typically occurs after a match is concluded, and it helps reinforce the spirit of camaraderie and goodwill between competitors. It signifies the importance of maintaining a positive attitude, acknowledging each other's skills, and fostering a sense of mutual respect on the court." - } - ] - }, - { - "id": "000000434813", - "image": "000000434813.jpg", - "conversations": [ - { - "from": "human", - "value": "What is located next to the street sign in the image?\n" - }, - { - "from": "gpt", - "value": "In the image, the street sign is sitting next to a container labeled \"salt.\"" - }, - { - "from": "human", - "value": "Is there a bench near the street sign as well?" - }, - { - "from": "gpt", - "value": "Yes, there is a bench nearby the street sign, probably for people to sit down and rest." - }, - { - "from": "human", - "value": "What is the overall setting of the image? Does it include grassy areas or bushes?" - }, - { - "from": "gpt", - "value": "The image includes a road near a grassy area with a bush. This contributes to the overall setting, which seems to be a public space with streets, signs, and seating areas." - }, - { - "from": "human", - "value": "What is the purpose of the salt container next to the street sign?" - }, - { - "from": "gpt", - "value": "The purpose of the salt container next to the street sign is likely for maintenance and safety reasons. During winter months, when roads and sidewalks become icy due to freezing temperatures and snowfall, salt is commonly used to melt the ice and provide traction for vehicles and pedestrians. The salt container is placed nearby the street sign so it can be easily accessed by maintenance workers or even residents for their safety in icy conditions." - }, - { - "from": "human", - "value": "Is there a dog house visible in this image? If not, what could have been mistaken for it?" - }, - { - "from": "gpt", - "value": "There is no dog house visible in the image. It is possible that the container labeled \"salt\" was initially mistaken for a dog house due to its similar size or shape. However, the container's label confirms that it is meant to store salt and is not a dog house." - } - ] - }, - { - "id": "000000254948", - "image": "000000254948.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the man doing in the image?\n" - }, - { - "from": "gpt", - "value": "The man in the image is standing on a lush green field with a soccer ball, getting ready to kick it. He is running toward the soccer ball in a fenced-in enclosure, dressed in a black uniform or track suit." - }, - { - "from": "human", - "value": "Is the man a soccer player?" - }, - { - "from": "gpt", - "value": "Yes, the man is a soccer player, as evidenced by his actions and attire in the image." - }, - { - "from": "human", - "value": "What type of clothing is the man wearing?" - }, - { - "from": "gpt", - "value": "The man is either wearing a black uniform or a track suit, indicating that he is an athlete or soccer player involved in physical activity." - }, - { - "from": "human", - "value": "What is the setting of the image?" - }, - { - "from": "gpt", - "value": "The setting of the image is a lush green field within a fenced-in enclosure, which is likely a soccer field or a sports facility designed for soccer or similar outdoor activities." - }, - { - "from": "human", - "value": "How can soccer help with a person's physical and mental well-being?" - }, - { - "from": "gpt", - "value": "Soccer is a physically demanding sport that offers numerous benefits to an individual's physical and mental well-being.\n\n1. Physical Benefits:\n- Cardiovascular Health: Soccer involves constant running and aerobic exercises, which help improve heart health and overall cardiovascular fitness.\n- Muscular Strength and Endurance: Soccer players engage in various physical movements, such as running, jumping, and kicking, that help to strengthen muscles and increase their endurance.\n- Flexibility and Coordination: The sport requires agility, flexibility, and coordination as players move, change directions, and maintain control of the ball during matches.\n- Burns Calories and Fat: Soccer is an excellent way to burn calories and maintain a healthy body weight due to its high-intensity nature.\n\n2. Mental Benefits:\n- Stress Relief: Playing soccer offers a healthy outlet for releasing stress and tension, leading to improved mental well-being.\n- Social Interaction: Soccer is a team sport that requires communication, cooperation, and camaraderie, promoting social interaction and allowing players to form strong bonds with teammates.\n- Builds Confidence and Self-Esteem: Accomplishing goals, improving skills, and contributing to team victories helps boost a player's sense of self-worth and confidence in their abilities.\n- Teaches Discipline and Perseverance: As soccer players develop their skills and face various challenges on the field, they learn the importance of discipline, perseverance, and maintaining a positive attitude.\n\nIn summary, soccer is an excellent sport for promoting physical and mental well-being. It offers a wide range of physical benefits related to cardiovascular health, muscular strength, flexibility, and weight management. Furthermore, soccer helps to relieve stress, foster social interaction, and teach valuable life skills, such as discipline and perseverance, which contribute to overall psychological health and personal growth." - } - ] - }, - { - "id": "000000405361", - "image": "000000405361.jpg", - "conversations": [ - { - "from": "human", - "value": "Are the people in the image waiting to cross the street or just standing together?\n" - }, - { - "from": "gpt", - "value": "The people in the image are waiting to cross the street at a crosswalk, standing together on the sidewalk near the edge of a busy street." - }, - { - "from": "human", - "value": "Is there a traffic light present in the image, and if so, what is its current status?" - }, - { - "from": "gpt", - "value": "Yes, there is a traffic light present in the image, and the people are waiting for the light to change, which suggests that it is currently red for pedestrians." - }, - { - "from": "human", - "value": "What does the presence of umbrellas indicate about the weather in the image?" - }, - { - "from": "gpt", - "value": "The presence of umbrellas indicates that it might be raining or drizzling, as people are using them to keep dry while waiting to cross the street." - }, - { - "from": "human", - "value": "Are there any cars visible in the image?" - }, - { - "from": "gpt", - "value": "Yes, there are passing cars visible on the busy street next to the group of people waiting on the sidewalk." - } - ] - }, - { - "id": "000000480023", - "image": "000000480023.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the hand holding in the image?" - }, - { - "from": "gpt", - "value": "The hand in the image is holding a hot dog." - }, - { - "from": "human", - "value": "What toppings are on the hot dog?" - }, - { - "from": "gpt", - "value": "The hot dog is covered in mustard, ketchup, and onions." - }, - { - "from": "human", - "value": "Where is the person holding the hot dog?" - }, - { - "from": "gpt", - "value": "The person holding the hot dog is at a sports stadium or a ballpark." - }, - { - "from": "human", - "value": "What is the hot dog served in?" - }, - { - "from": "gpt", - "value": "The hot dog is served in a paper container." - }, - { - "from": "human", - "value": "Is the person showing off the hot dog or is it a close-up shot?" - }, - { - "from": "gpt", - "value": "The person is holding the hot dog close to the camera, showcasing it in a close-up shot." - } - ] - }, - { - "id": "000000015906", - "image": "000000015906.jpg", - "conversations": [ - { - "from": "human", - "value": "What color is the chair in the image?\n" - }, - { - "from": "gpt", - "value": "The chair in the image is black." - }, - { - "from": "human", - "value": "What is the chair positioned under in the image?" - }, - { - "from": "gpt", - "value": "The chair is positioned under a clock on the wall." - }, - { - "from": "human", - "value": "What type of chair is visible in the image?" - }, - { - "from": "gpt", - "value": "The visible chair is a leather chair, suggesting it's comfortable and likely placed in a cozy space." - }, - { - "from": "human", - "value": "Is there anything placed on the chair, such as a book or other items?" - }, - { - "from": "gpt", - "value": "Yes, there is a book placed on the armrest of the black leather chair. This could indicate that someone was recently sitting there reading, or it's a convenient spot for keeping their current read." - }, - { - "from": "human", - "value": "What does the presence of the leather chair and clock on the wall convey about the space or room?" - }, - { - "from": "gpt", - "value": "The presence of the black leather chair and the clock on the wall behind it conveys a sense of coziness, relaxation, and a thoughtful atmosphere. The chair, being comfortable and inviting, suggests that the space is designed for people to unwind, read or even engage in conversations. The clock, on the other hand, provides a functional element to the room, allowing inhabitants to keep track of time when engaged in activities or conversations.\n\nCombining these elements, it can be inferred that the space was created with a particular focus on providing an area where relaxation, quiet contemplation, or intellectual pursuits could take place. This environment could be particularly suitable for someone who values their leisure time, enjoys reading, or appreciates a quieter, more thoughtful setting away from the hustle and bustle of daily life." - } - ] - }, - { - "id": "000000009951", - "image": "000000009951.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the main subject of the image?\n" - }, - { - "from": "gpt", - "value": "The main subject of the image is a young man riding a skateboard." - }, - { - "from": "human", - "value": "Where is the skateboarder located in the image?" - }, - { - "from": "gpt", - "value": "The skateboarder is located on the street, riding on the sidewalk and passing by a storefront, with a tall building in the background." - }, - { - "from": "human", - "value": "What kind of clothing and footwear is the skateboarder wearing?" - }, - { - "from": "gpt", - "value": "The skateboarder is wearing jeans, rubber shoes, and skateboard gear." - }, - { - "from": "human", - "value": "Is the skateboarder performing any specific tricks, or is he simply traveling on the skateboard?" - }, - { - "from": "gpt", - "value": "The skateboarder appears to be traveling on the skateboard rather than performing any specific tricks, as the image shows him turning a corner at high speeds and skating down the street." - } - ] - }, - { - "id": "000000468428", - "image": "000000468428.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat object is the woman holding at the beach?" - }, - { - "from": "gpt", - "value": "The woman is holding a surfboard at the beach." - }, - { - "from": "human", - "value": "What is the color of the surfboard the woman is carrying?" - }, - { - "from": "gpt", - "value": "The surfboard the woman is carrying is green." - }, - { - "from": "human", - "value": "What is the woman's intended activity, as suggested by the image?" - }, - { - "from": "gpt", - "value": "As the woman is carrying a surfboard, her intended activity is most likely surfing, either preparing to go into the water or exiting after a surfing session." - }, - { - "from": "human", - "value": "What are some of the basic techniques for beginners who want to start surfing?" - }, - { - "from": "gpt", - "value": "For beginners wanting to start surfing, there are some basic techniques and steps to follow:\n\n1. Choosing the right equipment: Beginners should start with a soft-top surfboard that is larger and more stable, making it easier to balance on and catch waves. Wearing a leash connected to the surfboard is essential for safety, as it prevents the board from being separated from the surfer in the water.\n\n2. Learning to paddle: Lying prone on the surfboard, the surfer should practice paddling by alternately reaching forward with their arms and pulling through the water. This technique helps propel the surfer in the water and catch waves.\n\n3. Practicing the pop-up: To stand up on the surfboard, the surfer needs to quickly transition from lying down to standing in a controlled, fluid motion, known as the pop-up. The surfer should place their hands near their chest and push themselves up, bringing their feet beneath them to achieve a balanced stance on the board.\n\n4. Stance and balance: Once standing, the surfer should adopt a low, slightly crouched stance, bending their knees and keeping their center of gravity low. The surfer should also try to maintain a relaxed posture, with their weight positioned evenly and arms used for balance.\n\n5. Reading and catching waves: Learning how to read the ocean and catch waves is a crucial aspect of surfing. Beginners should practice in small, gentle waves and focus on understanding how to position themselves correctly to catch a wave. Gradual progression to larger and more challenging waves can be made as the surfer gains experience and confidence.\n\n6. Respecting surf etiquette: It is important for surfers to respect the rules and etiquette of the surf spot, such as giving right of way to other surfers that are already on a wave and not \"dropping in\" on someone else's ride. This helps to ensure a safe and enjoyable experience for all surfers in the water.\n\nTaking surfing lessons from experienced surfers or certified surf instructors can significantly improve the learning process, as well as enhance safety and enjoyment on the water. It is always advisable to practice with friends or in the presence of other surfers to ensure you have help if needed. Safety should always be a priority, including checking weather and water conditions and knowing how to handle rip currents and other ocean hazards." - } - ] - }, - { - "id": "000000296675", - "image": "000000296675.jpg", - "conversations": [ - { - "from": "human", - "value": "What is happening in this baseball game?\n" - }, - { - "from": "gpt", - "value": "In the image, a batter is getting ready to hit the ball during a professional baseball game. The catcher and umpire are both waiting behind the batter at home base, poised for action, while spectators watch the game." - }, - { - "from": "human", - "value": "What are the roles of the umpire, batter, and catcher in this scene?" - }, - { - "from": "gpt", - "value": "In this scene, the umpire, batter, and catcher all play crucial roles in the baseball game. \n\nThe umpire is responsible for making decisions on various aspects of the game, such as calling balls, strikes, and outs. They ensure that the game is conducted according to the official rules and help maintain the integrity of the competition. The umpire is positioned behind the catcher and closely observes the trajectory of the pitched ball to determine whether it is within the strike zone.\n\nThe batter's primary objective is to hit the ball and achieve a base hit or an even better outcome, such as a home run. They must use their skill, timing, and strength to effectively connect with the incoming pitch, then attempt to advance as far around the bases as possible.\n\nThe catcher, positioned behind the batter and in front of the umpire, has several responsibilities. One of their main tasks is to catch the pitched ball thrown by the pitcher while also ensuring it remains within the strike zone. The catcher is responsible for signaling pitch types to the pitcher and defending the home plate against potential base-stealing attempts by the opposing team.\n\nIn the image, the umpire, batter, and catcher all contribute to the dynamic and exciting atmosphere of the baseball game. Each participant must be constantly aware of their unique role to contribute effectively to the game's progress." - }, - { - "from": "human", - "value": "What happens after the batter hits the ball?" - }, - { - "from": "gpt", - "value": "After the batter successfully hits the ball, they will drop the bat and attempt to run to the first base, and ultimately achieve as many bases as possible. The opposing team's fielders will try to catch the ball (resulting in an out if caught on the fly) or field the ball and throw it to the base they believe the batter or other baserunners will be going to in an attempt to get them out. If the batter makes it to first base without being tagged out or before the ball arrives at the base, they achieve a base hit. If the batter hits the ball far enough or with enough power, they may be able to achieve multiple bases or even hit a home run, allowing them to circle all the bases and score a run. Simultaneously, other baserunners on the field will also attempt to advance around the bases, increasing the team's scoring potential." - }, - { - "from": "human", - "value": "Are there any spectators watching the game in the image?" - }, - { - "from": "gpt", - "value": "Yes, there are people watching the baseball game in the image. The presence of spectators adds to the atmosphere, excitement, and pressure for the players as they compete in the match." - } - ] - }, - { - "id": "000000055202", - "image": "000000055202.jpg", - "conversations": [ - { - "from": "human", - "value": "What color is the tray in the image?\n" - }, - { - "from": "gpt", - "value": "The tray in the image is blue." - }, - { - "from": "human", - "value": "How many corn dogs are on the tray?" - }, - { - "from": "gpt", - "value": "There are three corn dogs on the tray." - }, - { - "from": "human", - "value": "What type of dipping sauce is available for the corn dogs?" - }, - { - "from": "gpt", - "value": "There are two cups of mustard available as dipping sauce for the corn dogs." - }, - { - "from": "human", - "value": "Are the corn dogs accompanied by any other food items on the tray?" - }, - { - "from": "gpt", - "value": "No, the image only shows three corn dogs on the blue tray along with the two cups of mustard. There are no other food items present." - }, - { - "from": "human", - "value": "Why are corn dogs considered a popular snack or fast food item?" - }, - { - "from": "gpt", - "value": "Corn dogs are a popular snack or fast food item for several reasons:\n\n1. Convenience: Corn dogs are easy to prepare and consume. They are often pre-cooked and can be quickly reheated in a microwave, oven, or deep fryer. Their stick makes them simple to grab, eat, and carry around, especially at events like fairs, carnivals, or sports games.\n\n2. Taste: Corn dogs consist of a sausage or hot dog coated in a thick layer of cornmeal batter and then fried until crispy. The combination of a juicy sausage and the sweet, crunchy cornmeal batter makes for a satisfying and flavorful snack.\n\n3. Affordability: Being a relatively inexpensive food choice, corn dogs are a popular choice for people looking for a budget-friendly meal option. The price of corn dogs usually remains low, which attracts consumers who are looking for something tasty, filling, and budget-conscious.\n\n4. Nostalgia: Corn dogs are often associated with childhood memories, making them an appealing choice for people seeking some comfort food or reminiscing about their youth when they enjoyed corn dogs at amusement parks or other fun events.\n\n5. Versatility: Corn dogs can be customized with various types of sausages and batters, as well as a wide array of dipping sauces, such as mustard or ketchup, spicy mayonnaise, or honey mustard. This allows people to enjoy corn dogs according to their taste preferences.\n\nIn conclusion, corn dogs are a popular snack or fast food item due to their convenience, taste, affordability, nostalgia, and versatility, making them an enjoyable and fun food option for a wide range of people." - } - ] - }, - { - "id": "000000475567", - "image": "000000475567.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many stuffed animals are visible in the image?" - }, - { - "from": "gpt", - "value": "There are two stuffed animals visible in the image." - }, - { - "from": "human", - "value": "Can you describe the stuffed animals in the image?" - }, - { - "from": "gpt", - "value": "There is a blue stuffed animal sitting next to a brown teddy bear. The teddy bear is wearing a pink hat and shirt. These stuffed animals are seated next to each other." - }, - { - "from": "human", - "value": "Where are the stuffed animals positioned in relation to a bed or crib?" - }, - { - "from": "gpt", - "value": "The two stuffed animals are positioned next to a bed or a baby crib, specifically at the foot of the bed." - }, - { - "from": "human", - "value": "What additional item can be found in the image along with the stuffed animals?" - }, - { - "from": "gpt", - "value": "There is an announcement or a birth certificate behind the stuffed animals in the image, adding significance to their presence." - }, - { - "from": "human", - "value": "What could be the purpose behind placing the stuffed animals next to the announcement or birth certificate?" - }, - { - "from": "gpt", - "value": "The purpose behind placing the stuffed animals next to the announcement or birth certificate could be to celebrate a recent birth or milestone, like a baby shower, a child's birthday, or a birth announcement. Stuffed animals, particularly teddy bears, are popular and recognizable symbols of childhood and innocence. Including them in a photograph with a document like an announcement, birth certificate, or baby-related event adds a personal, sentimental, and visually appealing touch that conveys a sense of warmth, love, and care, making the image more meaningful and memorable. Moreover, the stuffed animals contribute to the creation of a pleasant and inviting atmosphere in the child's room, adding a sense of comfort and companionship as the child grows." - } - ] - }, - { - "id": "000000309615", - "image": "000000309615.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat kind of vehicles can be seen on the busy city street?" - }, - { - "from": "gpt", - "value": "On the busy city street, there are cars and scooters that can be seen." - }, - { - "from": "human", - "value": "What is a significant feature of the stores on the street?" - }, - { - "from": "gpt", - "value": "A significant feature of the stores on the street is that there are many of them at this street and corner, creating a lively and bustling atmosphere." - }, - { - "from": "human", - "value": "How would you describe the overall feel of this city street?" - }, - { - "from": "gpt", - "value": "The overall feel of this city street is vibrant and bustling, with many people moving about, various types of vehicles navigating the road, and numerous stores lining the street, attracting shoppers and visitors. The presence of parked cars also indicates that the area is a popular destination for both locals and tourists. This description suggests that the street is located in the heart of the city, where daily activities and commerce are thriving." - }, - { - "from": "human", - "value": "What elements in the image show that the city street is located in an Asian city?" - }, - { - "from": "gpt", - "value": "The presence of people and scooters in the street scene indicates that the city street is in an Asian city. Scooters are particularly popular in many Asian cities, as they are a convenient and relatively affordable mode of transportation, especially in places with heavy traffic and limited parking spaces. Additionally, certain architectural and cultural elements, like the design of the stores, signage, or people's clothing, might provide clues that suggest that the city is situated in Asia." - } - ] - }, - { - "id": "000000386414", - "image": "000000386414.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many cats are in the image?" - }, - { - "from": "gpt", - "value": "There are two cats in the image." - }, - { - "from": "human", - "value": "Where are the cats located in the bathroom?" - }, - { - "from": "gpt", - "value": "One cat is sitting on top of the bathroom sink, and the other cat is sitting on the counter next to the sink." - }, - { - "from": "human", - "value": "What color are the cats?" - }, - { - "from": "gpt", - "value": "The cats are brown and white in color." - }, - { - "from": "human", - "value": "What could be the possible reasons for the cats being in the bathroom?" - }, - { - "from": "gpt", - "value": "Cats can be curious creatures, and there may be several reasons for them to be in the bathroom.\n\n1. Curiosity: Cats are intrigued by new or unfamiliar environments, and they might be exploring the bathroom to satisfy their natural curiosity.\n\n2. Comfort: Cats often seek out cozy and comfortable spots to rest. The bathroom sink or counter may provide a comfortable and secure place for them to lounge, particularly if the sink is cool and smooth, which can be appealing to cats.\n\n3. Attention: Cats may be seeking attention from their owners or companions. Sitting on the bathroom counter or in the sink may be a way for them to make their presence known and be close to their favorite humans.\n\n4. Water: Some cats are drawn to water sources, such as dripping faucets or water in the sink. They might find the sound or sight of water interesting or be seeking water to drink.\n \nFinally, every cat has its personality, and their reasons for being in the bathroom can differ. The two brown and white cats in the image may have found the bathroom sink and counter to be the perfect place to hang out together or to observe what their human is up to." - } - ] - }, - { - "id": "000000074861", - "image": "000000074861.jpg", - "conversations": [ - { - "from": "human", - "value": "How many trains are visible in the image?\n" - }, - { - "from": "gpt", - "value": "There are multiple trains visible in the image parked at different platforms." - }, - { - "from": "human", - "value": "Are there people visible at the train station? What are they doing?" - }, - { - "from": "gpt", - "value": "Yes, there are several people visible at the train station. Some of them are near the trains, while others are outside a train, saying goodbye to each other." - }, - { - "from": "human", - "value": "What can you say about the appearance of the trains in the photo?" - }, - { - "from": "gpt", - "value": "The trains in the image are very pretty, indicating that they might have an attractive, colorful design, or perhaps feature unique or eye-catching details, adding aesthetic appeal to the train station." - }, - { - "from": "human", - "value": "Are there any other notable features of the train station?" - }, - { - "from": "gpt", - "value": "The train station features multiple platforms, which accommodate the different trains parked there. This suggests that the station is well-organized, facilitating smooth and efficient transit services for passengers." - }, - { - "from": "human", - "value": "What do train stations generally provide for passengers in terms of amenities or services?" - }, - { - "from": "gpt", - "value": "Train stations offer various amenities and services to make the passengers' experience more comfortable and convenient. Some of these amenities may include:\n\n1. Ticket offices or ticket vending machines: This is where passengers can purchase tickets for their journeys or get information about various train routes, schedules, and fares.\n\n2. Waiting areas: Train stations usually have seating areas where passengers can sit and wait for their trains to arrive. These areas often feature benches or chairs, along with lighting and announcements or display screens to inform passengers about train arrivals and departures.\n\n3. Restrooms: Train stations typically have restroom facilities available for passengers to use, ensuring their comfort and hygiene needs are met during their time at the station.\n\n4. Food and beverage options: Many train stations have cafés, restaurants, or food vendors that offer a variety of food and drink options for passengers to enjoy while they wait for their trains.\n\n5. Retail stores: Some larger train stations also feature retail stores, where passengers can shop for various items, such as clothing, books, or souvenirs.\n\n6. Information kiosks and customer service counters: These areas help passengers with any questions they might have about train schedules, ticketing, or station facilities, guiding them and ensuring a smooth travel experience.\n\n7. Luggage storage facilities: Some train stations offer luggage storage services, allowing passengers to safely store their bags while they travel, explore the local area, or wait for their train.\n\n8. Accessibility features: Train stations are usually equipped with ramps, elevators, or escalators to accommodate passengers with mobility challenges, making it easier for them to access different platforms and areas within the station.\n\nIn conclusion, train stations provide various amenities and services to cater to the diverse needs of passengers, ensuring a comfortable and convenient experience for those using the transit system." - } - ] - }, - { - "id": "000000194022", - "image": "000000194022.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the man wearing in the image?" - }, - { - "from": "gpt", - "value": "The man in the image is wearing a black and red jacket." - }, - { - "from": "human", - "value": "What is the man doing with the cow?" - }, - { - "from": "gpt", - "value": "The man is standing beside the cow with his arm on it or holding onto it, demonstrating a sense of connection or ownership." - }, - { - "from": "human", - "value": "What type of animal is the man interacting with?" - }, - { - "from": "gpt", - "value": "The man is interacting with a large cow or bull." - }, - { - "from": "human", - "value": "Where is the man standing in the image?" - }, - { - "from": "gpt", - "value": "The man is standing in a barn next to a cow, suggesting a rural or agricultural setting." - }, - { - "from": "human", - "value": "Why might the man be standing next to the cow or bull in the barn?" - }, - { - "from": "gpt", - "value": "There could be several reasons for the man to be standing next to the cow or bull in the barn. Some of the potential reasons include:\n\n1. An Owner or Farmer: The man might be the owner of the cow or bull, or a farmer responsible for tending to and raising these animals. Farmers often interact closely with their livestock, monitoring their health and ensuring they are safe, well-fed, and comfortable.\n\n2. Veterinarian or Animal Health Professional: The man could be a veterinarian or animal health professional checking on the cow's well-being. In this scenario, he might be performing a routine examination, administering treatment, or providing care to the animal.\n\n3. Visitor or Tourist: The man might be a visitor or tourist at a farm or ranch. Such locations sometimes offer educational tours and hands-on experiences for visitors to learn about farming practices and interact with animals up close.\n\nIn any case, the man's interaction with the cow or bull in the barn demonstrates his interest in the animal and suggests a connection, be it professional or personal, to the animal and its environment." - } - ] - }, - { - "id": "000000059335", - "image": "000000059335.jpg", - "conversations": [ - { - "from": "human", - "value": "What are the main components of the bathroom?\n" - }, - { - "from": "gpt", - "value": "The main components of the bathroom include a stand-up shower, toilet, sink, shelves, built-in storage, a bench, and a window." - }, - { - "from": "human", - "value": "Where is the shower located in the bathroom?" - }, - { - "from": "gpt", - "value": "The shower is located in the corner of the bathroom." - }, - { - "from": "human", - "value": "What kind of storage is available in the bathroom?" - }, - { - "from": "gpt", - "value": "The bathroom has built-in storage in the form of shelves, which provides space for storing toiletries, towels, and other bathroom essentials." - }, - { - "from": "human", - "value": "Is there any seating available in the bathroom?" - }, - { - "from": "gpt", - "value": "Yes, there is a bench in the bathroom, which can be used for seating or placing items while using the bathroom." - }, - { - "from": "human", - "value": "Can you provide any tips for organizing or enhancing the storage space in a bathroom like the one in the image?" - }, - { - "from": "gpt", - "value": "To maximize storage space and enhance the organization in a bathroom like the one in the image, you can consider the following tips:\n\n1. Utilize vertical space: Install additional floating shelves or a wall-mounted cabinet above the toilet or sink to store extra towels, toiletries, and other necessities. This helps to maximize the available vertical space without causing clutter on the countertops.\n\n2. Opt for multi-purpose storage: Choose storage solutions that serve multiple purposes, such as a bench with storage compartments beneath it or a mirror with built-in storage behind it. This can help to create additional storage space while maintaining a clean and functional bathroom environment.\n\n3. Use containers and organizers: Keep your bathroom items neatly organized by using containers, such as clear plastic bins or decorative baskets, to store similar items together. Utilize drawer dividers, trays, or small bins to keep smaller items organized within your built-in storage or shelves.\n\n4. Add hooks: Install hooks or towel racks on the wall or behind the bathroom door to hang towels, bathrobes, or clothes. This helps to free up space on shelves and keeps your bathroom tidy.\n\n5. Keep frequently used items within reach: Arrange your bathroom items so that the most commonly used items are easily accessible. This can help streamline your daily routine and ensure a more efficient use of the available storage space.\n\nBy incorporating these tips, you can maximize the storage space and organization in your bathroom, making the space more functional and enjoyable." - } - ] - }, - { - "id": "000000410908", - "image": "000000410908.jpg", - "conversations": [ - { - "from": "human", - "value": "How many bananas can be seen on the table?\n" - }, - { - "from": "gpt", - "value": "There are two bananas sitting on top of the table." - }, - { - "from": "human", - "value": "What are the other objects on the table besides the bananas?" - }, - { - "from": "gpt", - "value": "Other objects on the table include nuts, oats, seeds, and a glass." - }, - { - "from": "human", - "value": "What is the surface of the table made of?" - }, - { - "from": "gpt", - "value": "The surface of the table is made of wood." - }, - { - "from": "human", - "value": "What could be a possible context or setting for this image?" - }, - { - "from": "gpt", - "value": "The image of the two bananas, nuts, oats, seeds, and a glass on a wooden table could be associated with a healthy meal preparation or a breakfast setting. The presence of various nutritious items suggests that the person who arranged these objects might be focusing on consuming a balanced and wholesome diet. \n\nThe arrangement of these objects might also imply that the person is in the process of making a smoothie or a healthy snack, as bananas, nuts, oats, and seeds are common ingredients in such dishes. A glass in the scene further supports the possibility of mixing these ingredients to create a smoothie. \n\nAnother possible context for this image could be a still life photography setup or an artistic composition. In this case, the objects have been carefully placed on the table to create a visually appealing scene, with a focus on natural light, shadows, textures, and colors.\n\nIn conclusion, the image of two bananas and various objects on a wooden table could be interpreted as a setting depicting healthy meal preparation, breakfast, or an artistic arrangement for photography purposes." - } - ] - }, - { - "id": "000000438698", - "image": "000000438698.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the gender of the person crossing the street in the image?\n" - }, - { - "from": "gpt", - "value": "The gender of the person crossing the street in the image is male." - }, - { - "from": "human", - "value": "What is the content of the image in terms of color?" - }, - { - "from": "gpt", - "value": "The image is black and white." - }, - { - "from": "human", - "value": "What is the person carrying while crossing the street?" - }, - { - "from": "gpt", - "value": "The person is carrying a shopping bag while crossing the street." - }, - { - "from": "human", - "value": "Is there any street sign or symbol pointing at the person in the image?" - }, - { - "from": "gpt", - "value": "Yes, there is an arrow pointing at the man in the image. He is standing under an arrow street sign while carrying groceries." - }, - { - "from": "human", - "value": "Is the person crossing the street at a crosswalk or at an unmarked location?" - }, - { - "from": "gpt", - "value": "The person is crossing the street at a crosswalk." - } - ] - }, - { - "id": "000000192497", - "image": "000000192497.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat activity is the woman in the image engaged in?" - }, - { - "from": "gpt", - "value": "The woman in the image is engaged in making cookies. She's scooping cookie dough from a spoon onto a cookie sheet in a kitchen." - }, - { - "from": "human", - "value": "What type of utensils is the woman using?" - }, - { - "from": "gpt", - "value": "The woman is using two utensils - a spoon and a cookie sheet - to make cookies." - }, - { - "from": "human", - "value": "Is the woman young or old?" - }, - { - "from": "gpt", - "value": "The woman in the image is described as a young woman." - }, - { - "from": "human", - "value": "Is there any indication of the purpose of the cookies she is making?" - }, - { - "from": "gpt", - "value": "There is no direct indication in the image of the specific purpose of the cookies, but it is suggested that she might be preparing food for a party." - }, - { - "from": "human", - "value": "Why is it important to use a cookie sheet when making cookies?" - }, - { - "from": "gpt", - "value": "Using a cookie sheet is essential when making cookies for several reasons:\n\n1. Even baking: Cookie sheets ensure even distribution of heat, leading to uniform baking results for all the cookies placed on the sheet. This minimizes the risk of some cookies being overbaked or underbaked.\n\n2. Easy handling: Cookie sheets provide a flat and stable surface for placing cookie dough, making it easier to slide the sheet in and out of the oven without accidentally disturbing the dough. This ensures that the cookies maintain their shape during baking.\n\n3. Non-stick surfaces: Many cookie sheets have non-stick surfaces or can be lined with parchment paper or silicone baking mats to prevent cookies from sticking or burning. This makes cookie removal and cleanup significantly easier.\n\n4. Reusable: Cookie sheets can be used multiple times for preparing large batches of cookies or other baked goods, making them a convenient and cost-effective option for home bakers.\n\nIn summary, using a cookie sheet when making cookies is essential to achieve consistent baking results, a seamless baking process, and to provide an easy way to handle and remove cookies from the oven." - } - ] - }, - { - "id": "000000412004", - "image": "000000412004.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat activity is the person engaged in?" - }, - { - "from": "gpt", - "value": "The person is engaged in flying a kite." - }, - { - "from": "human", - "value": "What is the gender of the person flying the kite?" - }, - { - "from": "gpt", - "value": "The person flying the kite is a woman." - }, - { - "from": "human", - "value": "What is the setting of the image, and what is the weather like?" - }, - { - "from": "gpt", - "value": "The setting of the image is a beautiful green field, and the weather appears to be cloudy." - }, - { - "from": "human", - "value": "How high is the kite flying in the sky?" - }, - { - "from": "gpt", - "value": "The kite is flying very high in the sky." - }, - { - "from": "human", - "value": "What color is the kite in the image?" - }, - { - "from": "gpt", - "value": "The image does not provide information about the color of the kite." - } - ] - }, - { - "id": "000000103549", - "image": "000000103549.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the man doing in the image related to the horse?" - }, - { - "from": "gpt", - "value": "The man is sitting on a fake horse, presumably participating in a parade." - }, - { - "from": "human", - "value": "What event seems to be taking place in the image?" - }, - { - "from": "gpt", - "value": "A parade seems to be taking place, with the man on the fake horse being one of the participants, and people watching the event." - }, - { - "from": "human", - "value": "What is the color of the man's clothing?" - }, - { - "from": "gpt", - "value": "The man is wearing blue clothing." - }, - { - "from": "human", - "value": "Are there any spectators seen in the image?" - }, - { - "from": "gpt", - "value": "Yes, there are people visible in the image, watching the parade and the man on the fake horse." - }, - { - "from": "human", - "value": "Why are fake horses used in parades, and how are they typically designed?" - }, - { - "from": "gpt", - "value": "Fake horses are often used in parades for various reasons, such as:\n\n1. Tradition: Some parades may have historical or cultural significance where the inclusion of horses was a common practice in the past, but using real horses is impractical for modern-day events. In this case, fake horses can serve as a symbolic representation, paying homage to that tradition.\n\n2. Safety and practicality: Real horses can be unpredictable and may pose risks to participants and spectators. Utilizing a fake horse eliminates these concerns and allows for safer and more controlled event management. Additionally, fake horses do not require the same level of care, feeding, or grooming as real horses, making them easier and more cost-effective to include in a parade.\n\n3. Art and creativity: Fake horses can be designed in various styles, colors, and materials, allowing for artistic expression and the opportunity for parade organizers to create visually striking and memorable displays.\n\nTypically, fake horses used in parades are designed to resemble real horses in form and proportions but may be stylized, decorated, or embellished to suit the event's theme, cultural heritage, or artistic vision. Common materials used for construction include fiberglass, papier-mâché, wood, and various types of fabric or soft materials.\n\nFake horses can be mounted on wheels or frames, allowing them to be easily pulled or pushed by participants, or they can be designed with leg supports and a hollow body for a person to wear and walk in, creating the illusion of riding a horse. Regardless of the method, the goal is to achieve a convincing representation of a horse while offering safety, convenience, and an engaging visual experience for the parade attendees." - } - ] - }, - { - "id": "000000280599", - "image": "000000280599.jpg", - "conversations": [ - { - "from": "human", - "value": "How many animals are in the image?\n" - }, - { - "from": "gpt", - "value": "There are four animals in the image." - }, - { - "from": "human", - "value": "What kind of animals are present in the image?" - }, - { - "from": "gpt", - "value": "The animals in the image are sheep." - }, - { - "from": "human", - "value": "Are the sheep standing or laying down?" - }, - { - "from": "gpt", - "value": "The sheep are laying down, resting in the grass." - }, - { - "from": "human", - "value": "What is the condition of the field in the image?" - }, - { - "from": "gpt", - "value": "The field in the image is grass-covered, green, and well-maintained." - }, - { - "from": "human", - "value": "Is there any indication in the image about the recent activities of the sheep or their environment?" - }, - { - "from": "gpt", - "value": "Yes, the image shows freshly-shorn sheep, which indicates that they have recently been through the shearing process, where their wool has been removed for various uses such as textile production. The sheep appear to be relaxing in the green field, resting in the shade, implying that they are comfortable and content in their environment. The presence of a tree nearby provides them with some shade and adds to the natural, pastoral setting. The well-maintained grassy field further suggests that their habitat is properly cared for and suitable for the sheep's well-being." - } - ] - }, - { - "id": "000000299273", - "image": "000000299273.jpg", - "conversations": [ - { - "from": "human", - "value": "How many couches are there in the living room?\n" - }, - { - "from": "gpt", - "value": "There are two couches in the living room." - }, - { - "from": "human", - "value": "What material is the coffee table made of?" - }, - { - "from": "gpt", - "value": "The coffee table in the living room is made of glass." - }, - { - "from": "human", - "value": "What color are the sofas in the living room?" - }, - { - "from": "gpt", - "value": "The sofas in the living room are red." - }, - { - "from": "human", - "value": "Is there a television in the living room?" - }, - { - "from": "gpt", - "value": "Yes, there is a television in the living room." - }, - { - "from": "human", - "value": "What is the person in the living room doing?" - }, - { - "from": "gpt", - "value": "The person in the living room is bending down by the television." - } - ] - }, - { - "id": "000000063912", - "image": "000000063912.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the weather condition in the image?\n" - }, - { - "from": "gpt", - "value": "The weather condition in the image is rainy." - }, - { - "from": "human", - "value": "What is the person doing on the sidewalk?" - }, - { - "from": "gpt", - "value": "The person is walking on the sidewalk while holding a black umbrella to protect themselves from the rain." - }, - { - "from": "human", - "value": "What color are the person's clothing and umbrella?" - }, - { - "from": "gpt", - "value": "The person is wearing black clothing, and they are using a black umbrella." - }, - { - "from": "human", - "value": "Is the image taken from a ground perspective or a higher vantage point, such as from a window?" - }, - { - "from": "gpt", - "value": "The image is taken from a higher vantage point, such as from a window, as it overlooks the person walking in the rain on the sidewalk." - }, - { - "from": "human", - "value": "How many people are visible in the image?" - }, - { - "from": "gpt", - "value": "There is a single person visible in the image, walking on the sidewalk and holding a black umbrella." - } - ] - }, - { - "id": "000000009542", - "image": "000000009542.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the woman doing in the image?\n" - }, - { - "from": "gpt", - "value": "The woman in the image is walking with an open umbrella, presumably to protect herself from the rain." - }, - { - "from": "human", - "value": "Does the woman have any other items with her?" - }, - { - "from": "gpt", - "value": "Yes, the woman is also wearing a backpack while walking with her umbrella." - }, - { - "from": "human", - "value": "Is the person walking in the rain or under clear skies?" - }, - { - "from": "gpt", - "value": "The person is walking in the rain, as they are using an open umbrella which indicates that it is probably raining." - }, - { - "from": "human", - "value": "Where is the woman walking? What is the background like?" - }, - { - "from": "gpt", - "value": "The woman is walking in front of a busy road. This suggests that she is in an urban or developed area with traffic and possibly other pedestrians around her." - } - ] - }, - { - "id": "000000359375", - "image": "000000359375.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat are the main components of the bathroom in the image?" - }, - { - "from": "gpt", - "value": "The main components of the bathroom in the image include a sink, a shower stall, and a mirror." - }, - { - "from": "human", - "value": "Is the bathroom spacious or small?" - }, - { - "from": "gpt", - "value": "The bathroom in the image is extremely small, with minimal things in it." - }, - { - "from": "human", - "value": "Is there any storage for toiletries visible in the bathroom?" - }, - { - "from": "gpt", - "value": "Yes, there is a basket of toiletries visible in the bathroom, likely meant for storage and easy access to essential items." - }, - { - "from": "human", - "value": "What color is the bathroom sink?" - }, - { - "from": "gpt", - "value": "The bathroom sink is white." - }, - { - "from": "human", - "value": "Can you discuss some challenges and advantages of having a small bathroom?" - }, - { - "from": "gpt", - "value": "Having a small bathroom can present both challenges and advantages. Here are some points to consider:\n\nChallenges:\n1. Limited space: A small bathroom provides limited space for maneuvering, making it difficult to move around comfortably or share the space with another person at the same time.\n2. Storage: Small bathrooms often have limited storage options, which can make it challenging to keep toiletries, towels, and other necessities organized and easily accessible.\n3. Clutter: With minimal space, small bathrooms can quickly become cluttered if not regularly maintained, giving a cramped and messy appearance.\n4. Functionality: Fitting all necessary fixtures and components, such as a sink, toilet, shower or bathtub, and storage, within a confined space may hinder the bathroom's overall functionality and aesthetics.\n\nAdvantages:\n1. Cost-effectiveness: Smaller bathrooms are generally less expensive to build, renovate, or maintain since they require fewer materials and lower maintenance costs for fixtures, plumbing, and electricity.\n2. Easy cleaning: Due to the limited space, small bathrooms can be easier and quicker to clean, as there are fewer surfaces to manage.\n3. Minimalism: A small bathroom encourages minimalism and thoughtful design choices, which can lead to a stylish and functional space without excess clutter or unnecessary accessories.\n4. Environmental impact: A smaller bathroom typically has a lower environmental impact, as it requires fewer resources and generates less waste, both during construction and ongoing upkeep.\n\nIn conclusion, small bathrooms can present challenges such as limited space, storage, and functionality, but they also offer advantages such as cost-effectiveness, easy cleaning, minimalism, and reduced environmental impact. The key to making a small bathroom work effectively is careful planning and design, so that it maximizes its potential while accommodating the occupants' needs and preferences." - } - ] - }, - { - "id": "000000434354", - "image": "000000434354.jpg", - "conversations": [ - { - "from": "human", - "value": "\nIs there a Christmas tree in the living room?" - }, - { - "from": "gpt", - "value": "Yes, there is a Christmas tree in the living room where the group of people is gathered." - }, - { - "from": "human", - "value": "What is the main activity of the group of people in the image?" - }, - { - "from": "gpt", - "value": "The main activity of the group of people in the image is opening gifts in the living room." - }, - { - "from": "human", - "value": "Are there any chairs visible in the living room where the people are gathered?" - }, - { - "from": "gpt", - "value": "Yes, there are chairs in the living room where the group of people is gathered near the Christmas tree." - }, - { - "from": "human", - "value": "Is there a television or monitor in the room?" - }, - { - "from": "gpt", - "value": "Yes, there is a giant television or monitor in the room where the group of people is gathered." - }, - { - "from": "human", - "value": "What is the significance of opening Christmas presents with friends and family?" - }, - { - "from": "gpt", - "value": "Opening Christmas presents with friends and family is a cherished tradition for many people. It carries significance in several ways:\n\n1. Strengthening bonds and relationships: Sharing moments of joy, surprise, and appreciation while opening gifts helps reinforce the emotional connections between friends and family members. This shared experience creates lasting memories and strengthens relationships.\n\n2. Expressing gratitude and thoughtfulness: Giving and receiving presents during Christmas serves as a way to show gratitude, love, and care for one another. The act of choosing, wrapping, and presenting gifts demonstrates thoughtfulness and an understanding of the recipient's tastes and preferences.\n\n3. Celebrating the spirit of the season: The exchange of gifts is closely associated with the spirit of Christmas, which emphasizes the values of generosity, kindness, and goodwill. By giving and receiving gifts, people come together to celebrate these values and create a festive atmosphere.\n\n4. Encouraging a sense of community and togetherness: The practice of opening presents together allows friends and family to set aside their daily routines and stresses to focus on spending quality time together. This fosters a sense of community, belonging, and togetherness among the group.\n\nIn conclusion, opening Christmas presents with friends and family is significant because it promotes strong bonds, gratitude, thoughtfulness, celebration of the season's spirit, and fosters a sense of community and togetherness. These factors contribute to making the tradition an emotionally and socially rewarding experience for those involved." - } - ] - }, - { - "id": "000000548184", - "image": "000000548184.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the small girl doing in the image?\n" - }, - { - "from": "gpt", - "value": "The small girl in the image is pulling a piece of luggage or a suitcase." - }, - { - "from": "human", - "value": "Is the girl near a car?" - }, - { - "from": "gpt", - "value": "Yes, the small girl is pulling the luggage bag near a car." - }, - { - "from": "human", - "value": "What type of surface is the girl on while pulling the suitcase?" - }, - { - "from": "gpt", - "value": "The girl is dragging the suitcase up a gravel driveway." - }, - { - "from": "human", - "value": "What can we infer about the context of the image?" - }, - { - "from": "gpt", - "value": "From the image, we can infer that the little girl and her family are likely on vacation or returning from a trip. Her action of pulling the suitcase after a car ride suggests that they have just arrived at their destination or returned home. The presence of the suitcase indicates that she and her family have been traveling, and the gravel driveway could be the location of their vacation spot, such as a cabin or a rental home. The small girl seems to be taking responsibility for her own luggage, perhaps showing independence or excitement about the vacation." - } - ] - }, - { - "id": "000000154708", - "image": "000000154708.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the main activity shown in the image?\n" - }, - { - "from": "gpt", - "value": "The main activity in the image is a man surfing on a wave in the ocean." - }, - { - "from": "human", - "value": "How large is the wave that the man is surfing?" - }, - { - "from": "gpt", - "value": "The wave that the man is surfing is moderate in size, not particularly small or extremely large." - }, - { - "from": "human", - "value": "Where is the man surfing - near the beach or in the middle of the ocean?" - }, - { - "from": "gpt", - "value": "The man is surfing at the beach, as the wave he is riding is likely to be close to shore rather than in the middle of the ocean." - }, - { - "from": "human", - "value": "What is the surfer's body position like on the surfboard?" - }, - { - "from": "gpt", - "value": "The surfer is standing on the surfboard while riding the wave, maintaining balance and control over the board." - }, - { - "from": "human", - "value": "Can you explain the basics of surfing and the skills required?" - }, - { - "from": "gpt", - "value": "Surfing is an outdoor water sport where individuals ride on the forward face of a moving wave, such as those found near beaches, using a surfboard. The primary goal of surfing is to glide across the surface of the water while maintaining balance and control. It requires a combination of physical strength, agility, balance, timing, and an understanding of how waves and ocean currents behave.\n\nSome key surfing skills and techniques include:\n\n1. Paddling: Before catching a wave, surfers need to paddle out using their arms to propel the surfboard through the water. This requires upper body strength and endurance.\n\n2. Duck diving: To reach the area where suitable waves break, surfers must pass through incoming waves. Duck diving involves pushing the nose of the surfboard underwater, diving beneath the breaking wave, and resurfacing on the other side to continue paddling out.\n\n3. Catching a wave: Timing is crucial in catching a wave. As a suitable wave approaches, surfers need to paddle in the same direction as the wave to gain enough speed to catch and ride it.\n\n4. Popping up: Once a surfer catches a wave, they must quickly transition from a prone position (lying on the surfboard) to a standing position. This is called popping up and requires strength, agility, and balance.\n\n5. Riding the wave: While riding the wave, surfers use their legs to shift weight and maintain balance on the surfboard. They also use their arms for support, guidance, and making adjustments to their direction and speed.\n\n6. Turning and maneuvering: While riding a wave, surfers need to perform various turns and maneuvers to stay on the wave and avoid wiping out. This requires precise control, technique, and an understanding of wave dynamics.\n\nSurfing also involves knowledge about ocean safety, including rip currents, tides, and local hazards, such as reefs or rocks. Moreover, surfing etiquette and understanding the unwritten rules and priorities on the water are essential to ensure a positive experience for everyone in the lineup." - } - ] - }, - { - "id": "000000242008", - "image": "000000242008.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the relationship between the man and the child in the image?\n" - }, - { - "from": "gpt", - "value": "The man in the image is the father of the small child." - }, - { - "from": "human", - "value": "Where is the child positioned in relation to the motorcycle?" - }, - { - "from": "gpt", - "value": "The child is sitting on the motorcycle." - }, - { - "from": "human", - "value": "What is the father doing in the image?" - }, - { - "from": "gpt", - "value": "The father is standing next to the motorcycle, posing with his small son who is sitting on the bike." - }, - { - "from": "human", - "value": "Is the motorcycle parked or in motion?" - }, - { - "from": "gpt", - "value": "The motorcycle is parked." - }, - { - "from": "human", - "value": "Can you discuss the importance of safety precautions when children are around motorcycles?" - }, - { - "from": "gpt", - "value": "Safety is a paramount concern when children are around motorcycles, as these vehicles have the potential to pose significant risks if necessary precautions are not taken. It is essential for parents and guardians to be aware of safety guidelines and ensure that children are properly educated about responsible behavior around motorcycles. Below are a few safety precautions to consider:\n\n1. Supervision: Children should always be supervised when around motorcycles, especially if they are allowed to sit on or touch the bike. Young children might be tempted to play with or operate the motorcycle, which could lead to accidents or injuries.\n\n2. Safety gear: When riding or sitting on a motorcycle, even for a short distance, it is essential for both the child and the adult to wear appropriate safety gear. This includes helmets, gloves, protective clothing, and sturdy footwear. Helmets can reduce the risk of head injuries, saving lives in case of accidents.\n\n3. Educate children: Teaching children about motorcycle safety from a young age can help them develop an understanding of responsible behavior around motorbikes. Discussing topics such as road safety, the importance of wearing protective gear, and how to approach a running motorcycle are essential aspects of motorcycle safety education.\n\n4. Child-sized motorcycle equipment: If a child is allowed to ride a motorcycle, it is crucial to provide them with child-sized safety gear and equipment, such as smaller helmets, gloves, and boots. Properly fitting safety gear provides better protection in case of an accident.\n\n5. Safe environment: Children should only be allowed to ride or touch motorcycles in safe, controlled environments, such as private properties or designated safe zones. Avoid exposing children to potentially hazardous situations, such as busy roads, uneven terrain, or slippery surfaces.\n\nIn conclusion, safety precautions are vital when children are around motorcycles to minimize the risk of accidents and injuries. By being vigilant, ensuring proper supervision, providing appropriate safety gear, educating children about motorcycle safety, and ensuring a safe environment, parents and guardians can create a safer space for children to interact with motorcycles." - } - ] - }, - { - "id": "000000097296", - "image": "000000097296.jpg", - "conversations": [ - { - "from": "human", - "value": "What action is the baseball player performing in the image?\n" - }, - { - "from": "gpt", - "value": "The baseball player is swinging at a pitch, about to hit the ball thrown by the pitcher with a baseball bat." - }, - { - "from": "human", - "value": "What object is the baseball player using to hit the ball?" - }, - { - "from": "gpt", - "value": "The baseball player is using a baseball bat to hit the ball." - }, - { - "from": "human", - "value": "Is the baseball player hitting the ball during a practice session or a game?" - }, - { - "from": "gpt", - "value": "The image shows the baseball player hitting the ball during a game." - }, - { - "from": "human", - "value": "Can you describe the moment captured in the image?" - }, - { - "from": "gpt", - "value": "The moment captured in the image is right before the batter connects the bat and the ball. This is a critical instant in the baseball game, as the batter's skill and timing in making contact with the ball can determine the outcome of the play. It could result in a home run, a base hit, a strike, or an out depending on the batter's swing and the trajectory of the ball after being hit." - }, - { - "from": "human", - "value": "How important is the batter's skill and timing in hitting the ball successfully?" - }, - { - "from": "gpt", - "value": "The batter's skill and timing play a crucial role in successfully hitting the ball during a baseball game. The ability to hit a fastball, curveball, or other types of pitches requires excellent hand-eye coordination, strength, and proper technique. \n\nThe timing aspect is critical because the batter needs to accurately judge the speed, direction, and potential movement of the ball in a split second. Adjusting the swing accordingly helps the batter make solid contact with the ball and increase the chances of sending it deep into the field or even hitting a home run. \n\nA successful batter must have keen reflexes, an understanding of the pitcher's throwing patterns, and the ability to make quick adjustments during the swing. Together, these elements help the batter improve their chances of connecting the bat with the ball effectively and achieving desirable results, such as scoring runs or advancing teammates around the bases. On the other hand, any shortcomings in skill or timing may lead to unfavorable outcomes, such as strikes and outs, which hamper their team's overall performance." - } - ] - }, - { - "id": "000000078562", - "image": "000000078562.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat color is the cat in the image?" - }, - { - "from": "gpt", - "value": "The cat in the image is white." - }, - { - "from": "human", - "value": "Where is the cat positioned in relation to the keyboard?" - }, - { - "from": "gpt", - "value": "The white cat is laying next to the keyboard on a desk." - }, - { - "from": "human", - "value": "What type of surface is the cat laying on?" - }, - { - "from": "gpt", - "value": "The cat is laying on a wood desk." - }, - { - "from": "human", - "value": "Is the cat sitting or laying down?" - }, - { - "from": "gpt", - "value": "The cat is laying down." - }, - { - "from": "human", - "value": "What is the likely reason for the cat to be next to the keyboard?" - }, - { - "from": "gpt", - "value": "While it's difficult to determine the exact reason for the cat's presence next to the keyboard, it's quite common for cats to seek the company of their owners or to be attracted to warm or comfortable surfaces. The cat may be lying next to the keyboard because it is near its owner who is likely using the computer, or it may simply find the desk's surface to be a comfortable spot to rest. Additionally, cats are often intrigued by the movement of objects like computer mice or the sound of keyboard presses, which could also pique their interest and cause them to lay beside the keyboard." - } - ] - }, - { - "id": "000000152764", - "image": "000000152764.jpg", - "conversations": [ - { - "from": "human", - "value": "How are the motorcycles arranged in the image?\n" - }, - { - "from": "gpt", - "value": "The motorcycles are arranged in a row, parked on the street." - }, - { - "from": "human", - "value": "What color are the motorcycles in the image?" - }, - { - "from": "gpt", - "value": "The motorcycles in the image are black." - }, - { - "from": "human", - "value": "What are the people doing near the motorcycles?" - }, - { - "from": "gpt", - "value": "The people, likely the motorcycle riders or owners, are congregating or milling about in the street near the parked motorcycles, possibly socializing, discussing their bikes, or preparing for a ride." - }, - { - "from": "human", - "value": "Are the motorcycles modern or vintage in style?" - }, - { - "from": "gpt", - "value": "The motorcycles in the image are described as vintage, which means they are older models or have a classic design." - }, - { - "from": "human", - "value": "What do the owners of the motorcycles appear to be wearing?" - }, - { - "from": "gpt", - "value": "The owners of the motorcycles appear to be wearing leather attire, which is often associated with motorcycle culture and serves as a protective gear for riders." - } - ] - }, - { - "id": "000000451597", - "image": "000000451597.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat items are present in the bowl of food?" - }, - { - "from": "gpt", - "value": "The bowl of food contains granola, bananas, strawberries, and raspberries. There is also a container of yogurt next to the bowl." - }, - { - "from": "human", - "value": "What type of dishware is used to hold the fruit mix and granola?" - }, - { - "from": "gpt", - "value": "The dishware used to hold the fruit mix and granola is a white plate, and the fruit mix and granola are served in a bowl placed on the plate." - }, - { - "from": "human", - "value": "Is there any utensil included in the image?" - }, - { - "from": "gpt", - "value": "Yes, there is a spoon included with the bowl of granola and fruit in the image." - }, - { - "from": "human", - "value": "What type of meal would this bowl of granola and fruit be suitable for?" - }, - { - "from": "gpt", - "value": "This bowl of granola and fruit would be suitable for a healthy and nutritious breakfast or snack. Granola is rich in fiber, and the addition of the mixed fruits — including bananas, strawberries, and raspberries — provides vitamins and antioxidants. The yogurt provides a good source of protein, calcium, and probiotics, which promote a healthy gut. Combined, these ingredients make for a well-rounded and delicious meal that can be enjoyed in the morning or as a wholesome snack during the day." - } - ] - }, - { - "id": "000000503781", - "image": "000000503781.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the bird doing in the image?\n" - }, - { - "from": "gpt", - "value": "The bird is perched on a tree branch, sitting and resting among the green leaves surrounding it." - }, - { - "from": "human", - "value": "What is the visibility of the bird in the image?" - }, - { - "from": "gpt", - "value": "The bird is visible but appears to be somewhat obscured by blurry leaves around it, which creates a somewhat ruffled appearance." - }, - { - "from": "human", - "value": "Can you describe the general atmosphere or setting of the image?" - }, - { - "from": "gpt", - "value": "The image displays a serene and peaceful atmosphere characterized by the bird quietly resting on a branch. With the green leaves surrounding the bird, it suggests that the setting is likely in a natural environment, such as a park or a forest. The overall scene evokes a sense of tranquility and harmony between the bird and its surroundings." - }, - { - "from": "human", - "value": "Why might the bird be resting on the branch?" - }, - { - "from": "gpt", - "value": "Birds typically perch and rest on branches for various reasons. Some possible explanations for the bird resting on the branch in this image could be:\n\n1. Safety and predator avoidance: Birds often rest up high on branches to avoid predators and observe their surroundings from a more secure vantage point.\n\n2. Conserving energy: After flying, foraging, or other activities, birds may perch on branches to rest and conserve energy for their next flight or activity. Resting also helps birds maintain their body temperature and recover from exhaustion.\n\n3. Grooming: Perching on a branch provides birds the opportunity to groom their feathers, which is an essential part of maintaining their overall health and flight capabilities.\n\n4. Courtship and social interactions: Birds may also rest on branches as part of their courtship rituals or to engage in social interactions with other birds.\n\nIn summary, the bird in the image might be resting on the branch for safety, energy conservation, grooming, or social interactions. The serene and peaceful feeling conveyed by the scene emphasizes the bird's state of relaxation and contentment within its natural environment." - } - ] - }, - { - "id": "000000427548", - "image": "000000427548.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many computer monitors are on the desk?" - }, - { - "from": "gpt", - "value": "There are four computer monitors on the desk." - }, - { - "from": "human", - "value": "Is there a computer tower on the desk as well?" - }, - { - "from": "gpt", - "value": "Yes, there is a computer tower sitting together with the four monitors on the desk." - }, - { - "from": "human", - "value": "Do the computer monitors have the same screensaver?" - }, - { - "from": "gpt", - "value": "Yes, the multiple computer monitors on the desk have the same screensaver." - }, - { - "from": "human", - "value": "What type of environment does this desk setup suggest?" - }, - { - "from": "gpt", - "value": "The desk setup with four computer monitors and a tower suggests a work or office environment. A multi-monitor setup like this is often used by professionals who require extensive screen real estate for their tasks, such as graphic designers, programmers, video editors, or financial traders. Having multiple screens allows them to have multiple applications, documents, or projects open simultaneously, making it easier to multitask, navigate between applications, and be more efficient. The presence of the same screensaver on all monitors implies that the setup may be part of an organized workspace, possibly within a corporate setting where uniformity is maintained. Overall, it appears to be a productive and efficient workstation designed to handle complex or demanding tasks." - } - ] - }, - { - "id": "000000344187", - "image": "000000344187.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat sport are the people playing in the image?" - }, - { - "from": "gpt", - "value": "The people in the image are playing baseball." - }, - { - "from": "human", - "value": "What is the batter attempting to do in the image?" - }, - { - "from": "gpt", - "value": "The batter is attempting to hit the baseball with the bat during his swing." - }, - { - "from": "human", - "value": "Is the batter successful in hitting the ball?" - }, - { - "from": "gpt", - "value": "No, the batter misses the ball during his swing." - }, - { - "from": "human", - "value": "What is the role of the person standing behind the batter with the catcher's mitt?" - }, - { - "from": "gpt", - "value": "The person standing behind the batter with the catcher's mitt is playing the role of the catcher in the game of baseball. The catcher's primary responsibility is to receive and catch the pitches thrown by the pitcher towards the home plate. They are also responsible for coordinating the pitcher's strategy and keeping an eye on base runners. In the image, the catcher is prepared and positioned to catch the ball if the batter misses the swing." - }, - { - "from": "human", - "value": "What is the significance of a batter hitting the ball well during a game of baseball?" - }, - { - "from": "gpt", - "value": "In the game of baseball, the batter's main objective is to hit the ball and make contact with it in such a way that they are able to reach one or more of the bases safely, potentially scoring runs for their team. Hitting the ball well is essential for the following reasons:\n\n1. Advancing bases: Good contact with the ball allows the batter to advance to the first base or even beyond, based on the strength and angle of the hit. Better hits increase the odds of reaching multiple bases or scoring a run directly.\n \n2. Driving in runs: When there are runners on base, a well-timed and powerful hit can drive in the base runners to score runs for the team, thereby improving the team's overall score and chances of winning.\n\n3. Challenging the fielders: When a batter hits the ball well, they create a challenging situation for the opposing team's fielders, forcing them to react quickly, adjust their positions, or make difficult plays to prevent base advances or runs.\n\n4. Sequential hitting: Consistently hitting the ball well could lead to a string of successful plays or “rallies” for the batting team, increasing momentum and boosting the team's morale.\n\nIn summary, hitting the baseball well is crucial for advancing bases, driving in runs, challenging the opposing team's fielders, and maintaining positive momentum throughout the game. Therefore, a batter's ability to consistently make good contact with the ball is a valuable skill in the game of baseball." - } - ] - }, - { - "id": "000000444367", - "image": "000000444367.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat type of train is depicted in the image?" - }, - { - "from": "gpt", - "value": "The image shows a commuter train, which is designed for transporting passengers over short to medium distances within a metropolitan area or between nearby cities." - }, - { - "from": "human", - "value": "Can you describe the colors of the train in the image?" - }, - { - "from": "gpt", - "value": "The train in the image is red and black." - }, - { - "from": "human", - "value": "What is the current status of the train in relation to the platform?" - }, - { - "from": "gpt", - "value": "The train is leaving the station, speeding past a subway platform where passengers walk and wait." - }, - { - "from": "human", - "value": "How does the platform appear in terms of cleanliness?" - }, - { - "from": "gpt", - "value": "The platform in the image is clean, indicating that it is well-maintained and cared for by the transportation authorities." - }, - { - "from": "human", - "value": "What can you infer about the efficiency of the public transportation system from the image?" - }, - { - "from": "gpt", - "value": "Based on the image, the public transportation system seems to be efficient, as evidenced by the clean subway platform and the speeding train leaving the station. The clean platform suggests that the transportation authorities are diligent in maintaining the station, which can directly impact the overall quality and satisfaction of the riders. The train's timely departure also implies that the rail system is likely running on a schedule, allowing passengers to plan their commutes accordingly.\n\nEfficient public transportation is essential for a city, as it can alleviate traffic congestion, reduce pollution, and help residents travel more conveniently from one place to another. Furthermore, a well-organized and clean transportation system can contribute to a positive experience for the passengers and encourage more people to use public transit, ultimately leading to a more sustainable, eco-friendly city. Therefore, the image of a commuter train leaving a clean subway platform reflects a well-managed public transportation system that is beneficial to both the residents and the city itself." - } - ] - }, - { - "id": "000000114697", - "image": "000000114697.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the activity being depicted in the image?" - }, - { - "from": "gpt", - "value": "The activity depicted in the image is a young boy playing baseball in a green field. He is swinging a baseball bat, getting ready to hit a baseball at home plate." - }, - { - "from": "human", - "value": "What is the color of the field where the boy is playing baseball?" - }, - { - "from": "gpt", - "value": "The color of the field where the boy is playing baseball is green." - }, - { - "from": "human", - "value": "Is the baseball player holding anything, and if so, what is he holding?" - }, - { - "from": "gpt", - "value": "Yes, the baseball player is holding a baseball bat." - }, - { - "from": "human", - "value": "What stage of hitting the baseball is the young boy at in the image?" - }, - { - "from": "gpt", - "value": "In the image, the young boy is just getting ready to hit the baseball. This suggests that he is at the stage where he is swinging (or has just begun to swing) the bat, preparing to make contact with the ball." - }, - { - "from": "human", - "value": "What factors contribute to a successful baseball hit?" - }, - { - "from": "gpt", - "value": "Several factors contribute to a successful baseball hit, and mastering these factors can improve a player's batting performance:\n\n1. Proper stance and body position: A good stance involves keeping the feet shoulder-width apart and knees slightly bent. This allows for better balance, mobility, and power generation.\n\n2. Grip and hand positioning: How a player grips the bat and positions their hands can significantly impact their swing. A firm yet relaxed grip and appropriate hand positioning can help the hitter maintain control and generate optimal bat speed.\n\n3. Bat speed: An essential factor in hitting the ball hard and far, bat speed is generated through a combination of body mechanics, hip rotation, and arm strength.\n\n4. Timing: Excellent timing is crucial in hitting a baseball successfully. The player needs to begin their swing at the right moment to make solid contact with the ball.\n\n5. Swing path: An efficient and consistent swing path is vital for maximizing the chances of making contact with the ball. The hitter must keep their head down and eyes on the ball, following it all the way to the point of contact.\n\n6. Point of contact: Making contact with the ball at the optimal point in the swing is essential. The ideal point of contact can vary depending on the pitch location, swing style, and the desired outcome (such as hitting for power or contact).\n\n7. Follow-through: Once the ball is hit, the player should follow through with their swing, making sure to complete the rotation of the upper body, hips, and arms. This follow-through helps generate power and maintain balance.\n\nIn summary, a successful baseball hit requires a combination of proper stance, grip, bat speed, timing, swing path, point of contact, and follow-through. All these factors work together to improve a player's batting performance and increase their chances of hitting the ball hard and accurately." - } - ] - }, - { - "id": "000000402980", - "image": "000000402980.jpg", - "conversations": [ - { - "from": "human", - "value": "What color and type of vehicle is featured in the image?\n" - }, - { - "from": "gpt", - "value": "The image features an older model red pickup truck." - }, - { - "from": "human", - "value": "What is the condition of the truck's load bed?" - }, - { - "from": "gpt", - "value": "The truck's load bed is loaded with various items, including a large blue object." - }, - { - "from": "human", - "value": "What is the location of the parked truck?" - }, - { - "from": "gpt", - "value": "The red pickup truck is parked in the gravel, in a driveway, and positioned next to a fence." - }, - { - "from": "human", - "value": "Can you describe the size of the red truck?" - }, - { - "from": "gpt", - "value": "The red truck is a small-sized pickup, although the exact dimensions cannot be gleaned from the image, it is visibly smaller than a full-sized pickup truck." - }, - { - "from": "human", - "value": "Is it possible to identify the large blue object in the truck's load bed from the image?" - }, - { - "from": "gpt", - "value": "Based on the image's description, it is challenging to confidently identify the large blue object in the truck's load bed, as detailed information about its shape, material, or purpose is not provided." - } - ] - }, - { - "id": "000000023951", - "image": "000000023951.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the main activity of the person in the image?" - }, - { - "from": "gpt", - "value": "The main activity of the person in the image is flying a kite." - }, - { - "from": "human", - "value": "What colors are present on the kite being flown in the image?" - }, - { - "from": "gpt", - "value": "The kite being flown in the image has blue and red colors." - }, - { - "from": "human", - "value": "What are the weather conditions in the image?" - }, - { - "from": "gpt", - "value": "The weather conditions in the image are overcast, with a cloudy sky in the background." - }, - { - "from": "human", - "value": "Can you describe the setting where the person is flying the kite?" - }, - { - "from": "gpt", - "value": "The person is flying the kite in an open field with a sky in the background. The space is likely chosen for its abundance of open space and minimal obstructions, making it an ideal location for kite flying." - }, - { - "from": "human", - "value": "What factors can impact the kite flying experience?" - }, - { - "from": "gpt", - "value": "Several factors can impact the kite flying experience, such as:\n\n1. Wind conditions: Sufficient and steady wind is essential for successful kite flying. If the wind is too weak or inconsistent, the kite may struggle to stay aloft. Conversely, if the wind is too strong, the kite might become difficult to control or even become damaged.\n\n2. Kite design and materials: The construction, materials, and design of the kite can significantly influence its flight characteristics. Lightweight materials and well-balanced designs can provide better lift and stability, while poorly constructed kites may have difficulty maintaining flight or responding to the user's inputs.\n\n3. Skill and experience of the kite flyer: A skilled and experienced kite flyer can better control the kite, adapting to changing wind conditions and maneuvering the kite more effectively. Beginners may struggle with managing the kite's position and movements, especially in challenging conditions.\n\n4. Obstructions and terrain: Flying a kite in an area with obstructions such as trees, buildings, or power lines can be dangerous and hinder the kite's movement. Ideally, kite flyers should choose open and obstruction-free areas to fly their kites, such as fields or parks.\n\n5. Weather conditions: In addition to wind, other weather conditions can impact the kite flying experience. Overcast skies or rain can make kite flying less enjoyable and potentially more challenging, while sunny and clear days are typically preferred for outdoor activities such as kite flying.\n\nIn conclusion, factors such as wind conditions, kite design, the skill of the kite flyer, the presence of obstructions, and weather conditions can all impact the kite flying experience, either enhancing or hindering the activity." - } - ] - }, - { - "id": "000000136979", - "image": "000000136979.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the main subject of the image?" - }, - { - "from": "gpt", - "value": "The main subject of the image is a bicycle rider who is wearing a backpack and helmet while riding on a busy city street with a public transit bus nearby." - }, - { - "from": "human", - "value": "Is the photo in color or black and white?" - }, - { - "from": "gpt", - "value": "The photo is in black and white." - }, - { - "from": "human", - "value": "What are the other elements present in the image besides the bicycle rider and the bus?" - }, - { - "from": "gpt", - "value": "In addition to the bicycle rider and the bus, the image also features a beautiful woman waiting to cross the street and people nearby, indicating a lively urban environment." - }, - { - "from": "human", - "value": "What safety measures is the bicycle rider taking?" - }, - { - "from": "gpt", - "value": "The bicycle rider is taking safety measures by wearing a helmet while riding, which can protect their head in case of an accident or fall." - }, - { - "from": "human", - "value": "How does the presence of the public transit bus relate to the overall theme of the image?" - }, - { - "from": "gpt", - "value": "The presence of the public transit bus contributes to the overall theme of the image by showcasing a bustling city street with various modes of transportation. The bus, along with the bicycle rider and pedestrians, helps create a picture of a diverse urban landscape where people are engaged in different activities, such as commuting, leisure, or work. This scene highlights the dynamics of a modern city, where public transit, bicycles, and pedestrians coexist, showcasing the importance of urban planning and infrastructure that accommodates multiple forms of mobility for a sustainable and vibrant city life." - } - ] - }, - { - "id": "000000226354", - "image": "000000226354.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the person doing in the image?\n" - }, - { - "from": "gpt", - "value": "The person in the image is riding a skateboard, performing a trick on a yellow ramp or street sidewalk." - } - ] - }, - { - "id": "000000027424", - "image": "000000027424.jpg", - "conversations": [ - { - "from": "human", - "value": "How many people are in the image?\n" - }, - { - "from": "gpt", - "value": "There are three people in the image - a man and two women." - }, - { - "from": "human", - "value": "What is the activity that the people are engaged in?" - }, - { - "from": "gpt", - "value": "The people in the image are engaged in playing a video game, with one person holding a Wii remote, possibly participating in a motion-controlled game." - }, - { - "from": "human", - "value": "What is the setting of the image?" - }, - { - "from": "gpt", - "value": "The setting of the image is a living room." - }, - { - "from": "human", - "value": "What is the position of the women relative to the man holding the remote?" - }, - { - "from": "gpt", - "value": "The two women are standing near the man who is holding the remote. One of the women is raising her hand." - }, - { - "from": "human", - "value": "What kind of game can be played with a Wii remote? Can you give some examples?" - }, - { - "from": "gpt", - "value": "A Wii remote, also known as a WiiMote, is a unique controller used with the Nintendo Wii gaming console. It enables users to play motion-controlled games by incorporating an accelerometer, infrared detection, and other sensors to detect the player's movements and translate them into in-game actions. Some popular games that can be played with a Wii remote include:\n\n1. Wii Sports: This game features multiple sports simulations like tennis, bowling, baseball, golf, and boxing. Players can swing the Wii remote like a tennis racket or roll it like a bowling ball to control their character's movement within the game.\n\n2. Mario Kart Wii: In this racing game, players can use the Wii remote in combination with the Wii Wheel accessory to steer their characters and navigate through various racetracks.\n\n3. The Legend of Zelda: Skyward Sword: This action-adventure game involves controlling the main character, Link, through combat and puzzle-solving challenges. Players can swing the Wii remote like a sword and use other motion gestures for different in-game actions.\n\n4. Just Dance: A popular dance game series, where players follow the on-screen character's choreography and move the Wii remote in sync with the music and dance moves.\n\nThese examples demonstrate the versatility and interactivity that the Wii remote brings to gaming on the Nintendo Wii console. The motion controls and ability to engage players in a more immersive and active gaming experience make it popular among casual gamers and families alike." - } - ] - }, - { - "id": "000000272671", - "image": "000000272671.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the individual in the image doing?\n" - }, - { - "from": "gpt", - "value": "The individual in the image is performing a skateboarding trick or stunt, likely at a skate park." - }, - { - "from": "human", - "value": "Is the skateboarder in the air during this trick?" - }, - { - "from": "gpt", - "value": "Yes, the skateboarder is in the air while performing this trick, which may involve flipping the skateboard." - }, - { - "from": "human", - "value": "What is the gender and approximate age of the skateboarder?" - }, - { - "from": "gpt", - "value": "The skateboarder is a young boy or man, but the exact age cannot be determined from the image. Nonetheless, the individual is relatively young." - }, - { - "from": "human", - "value": "Can you provide some examples of popular skateboarding tricks?" - }, - { - "from": "gpt", - "value": "Certainly! Skateboarding tricks are numerous and come in various levels of difficulty. Some popular skateboarding tricks include:\n\n1. Ollie: This is a basic trick where the skateboarder jumps with the skateboard into the air without using their hands. The ollie is one of the first tricks a novice skateboarder will learn, as it serves as the foundation for many other tricks.\n\n2. Kickflip: In this trick, the skateboarder does an ollie and then uses their front foot to kick the board, causing it to spin in a 360-degree horizontal motion before landing back on the board.\n\n3. Heelflip: Similar to the kickflip, a heelflip involves the skateboarder doing an ollie and then using their back foot to kick the board, causing it to spin in a 360-degree horizontal motion, but in the opposite direction from a kickflip.\n\n4. Pop Shove-it: This trick involves the skateboarder doing an ollie, and then using their back foot to shove the board into a 180-degree or 360-degree spin, without flipping the skateboard.\n\n5. Grind: In this trick, the skateboarder slides along a surface, such as a rail or the edge of a ramp, using the trucks (the metal parts that connect the wheels to the board) instead of the wheels.\n\n6. Manual: Similar to a \"wheelie\" on a bicycle, a manual involves the skateboarder balancing on the back two wheels while rolling, usually for a certain distance.\n\n7. 50-50 Grind: This is a type of grind where both the back and front trucks of the skateboard slide across a surface.\n\nThese are just a few examples of popular skateboarding tricks, but there are many more advanced and creative maneuvers in the world of skateboarding that serve as a form of expression and artistry for skateboarders." - } - ] - }, - { - "id": "000000265432", - "image": "000000265432.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat types of items can be seen on the bathroom counter?" - }, - { - "from": "gpt", - "value": "The bathroom counter is covered with personal care and beauty products, such as hair products and various other supplies related to hygiene and grooming." - }, - { - "from": "human", - "value": "Are there any open drawers or cupboards in the bathroom?" - }, - { - "from": "gpt", - "value": "Yes, the bathroom has open drawers and cupboards, possibly revealing more personal care items or storage spaces." - }, - { - "from": "human", - "value": "How would you describe the overall state of the bathroom?" - }, - { - "from": "gpt", - "value": "The bathroom appears to be cluttered, with a large number of beauty care products on the counter, as well as open drawers and cupboards. This may indicate that the user has many personal care items, or that they have not taken the time to organize and tidy up the space." - }, - { - "from": "human", - "value": "What suggestions could you give to improve the organization of the bathroom?" - }, - { - "from": "gpt", - "value": "To improve the organization of the cluttered bathroom, one could consider the following suggestions:\n\n1. Assess and declutter: Start by going through all the personal care items, and remove any products that are expired, unused, or no longer needed. This will help eliminate excess clutter and make the bathroom more manageable.\n\n2. Utilize storage solutions: Invest in storage solutions such as drawer organizers, plastic bins, or small baskets to keep items categorized and easily accessible. Assign a specific space for each type of product, such as hair care, skincare, or dental hygiene items.\n\n3. Close drawers and cupboards: Make it a habit to close drawers and cupboards once you have retrieved or put away an item. Not only does this keep the bathroom looking tidier, but it also protects items from dust and moisture.\n\n4. Implement a regular cleaning schedule: To maintain the organization of the bathroom, establish a regular cleaning schedule where you tidy up the counter, wipe down surfaces, and reorganize any misplaced items.\n\n5. Use vertical space: If the bathroom counter remains crowded, consider installing wall shelves or cabinets to create additional storage space. This can help to clear some items off the counter and make the bathroom appear more organized.\n\nBy implementing these suggestions, the cluttered bathroom will likely become a more organized, efficient, and visually appealing space." - } - ] - }, - { - "id": "000000154815", - "image": "000000154815.jpg", - "conversations": [ - { - "from": "human", - "value": "\nHow many sheep are visible in the image?" - }, - { - "from": "gpt", - "value": "There are two sheep visible in the image." - }, - { - "from": "human", - "value": "What are the sheep doing in the image?" - }, - { - "from": "gpt", - "value": "The two sheep are standing in the pasture and looking through the fence." - }, - { - "from": "human", - "value": "Is the photograph taken from inside or outside the fenced area?" - }, - { - "from": "gpt", - "value": "The photograph is taken from behind the fence, looking at the sheep through the fence." - }, - { - "from": "human", - "value": "What kind of fence is present in the image?" - }, - { - "from": "gpt", - "value": "There is a barbed wire fence in the image." - }, - { - "from": "human", - "value": "Is the sheep stationary or moving in the image?" - }, - { - "from": "gpt", - "value": "The sheep are stationary, standing and looking through the fence. There is no indication in the image that the small sheep is running away from the fence." - } - ] - }, - { - "id": "000000271198", - "image": "000000271198.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the main feature of the street sign in the image?" - }, - { - "from": "gpt", - "value": "The main feature of the street sign in the image is that it has multiple colorful arrows pointing in various directions, indicating different destinations or routes one could take." - }, - { - "from": "human", - "value": "Are the destinations on the street sign real or make-believe?" - }, - { - "from": "gpt", - "value": "The destinations on the street sign are make-believe, adding a whimsical touch to the sign's appearance." - }, - { - "from": "human", - "value": "How would you describe the appearance of the street sign?" - }, - { - "from": "gpt", - "value": "The street sign has a colorful and playful appearance, featuring a wide variety of signs and arrows pointing in many different directions. Its make-believe destinations make it visually interesting and unique, drawing attention to its vibrant and imaginative design." - }, - { - "from": "human", - "value": "Where do you think a street sign like this would typically be found or used?" - }, - { - "from": "gpt", - "value": "A street sign like this, with colorful arrows and make-believe destinations, would likely be found in a creative or playful setting such as an amusement park, an art installation, or a themed event. It could also be seen in a neighborhood or community with a quirky or eccentric atmosphere, where residents appreciate imaginative and unconventional public art. The sign might serve as a decorative and conversation-provoking element in such environments, rather than providing practical navigational information. However, it is important to note that the specific location of the sign in the image cannot be determined from the details provided." - } - ] - }, - { - "id": "000000062604", - "image": "000000062604.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is the primary subject of the image?" - }, - { - "from": "gpt", - "value": "The primary subject of the image is a large church cathedral with stained glass windows and rows of wooden pews." - }, - { - "from": "human", - "value": "What type of windows does the cathedral have?" - }, - { - "from": "gpt", - "value": "The cathedral features stained glass windows, adding to the beauty and ambience of the interior." - }, - { - "from": "human", - "value": "Are there any notable features or details within the cathedral?" - }, - { - "from": "gpt", - "value": "Some notable features within the cathedral include rows of wooden pews, a walkway between the rows, and lighting throughout the space. These elements contribute to the grandeur and solemn atmosphere of the cathedral." - }, - { - "from": "human", - "value": "How does the interior lighting affect the cathedral's appearance?" - }, - { - "from": "gpt", - "value": "The interior lighting plays an essential role in enhancing the cathedral's ambience and highlighting its architectural details. The light illuminates the space, making the wooden pews, walkways, and other features more visible and inviting. Additionally, the light enhances the beauty of the stained glass windows by shining through the colored panes, creating vibrant patterns on the interior surfaces and adding depth to the overall space. The strategic use of lighting helps to create a serene and contemplative atmosphere, which is fitting for a place of worship." - }, - { - "from": "human", - "value": "Can you tell me more about the role of stained glass windows in cathedrals?" - }, - { - "from": "gpt", - "value": "Stained glass windows have played a significant role in the architecture and design of cathedrals for centuries. In addition to their aesthetic appeal, these windows served various practical, cultural, and religious purposes.\n\n1. Aesthetics: Stained glass windows are known for their intricate designs and vibrant colors, which add beauty and elegance to the cathedral interiors. When sunlight passes through the colored panes, it creates a captivating, otherworldly display of light and shadow inside the building.\n\n2. Storytelling: In the Middle Ages, when the majority of the population was illiterate, stained glass windows were used as a visual medium to convey religious stories and teachings. The windows often depicted Biblical narrative, scenes from the lives of saints, or moral lessons, allowing worshipers to learn about their faith through images rather than text.\n\n3. Symbolism: The themes, images, and colors chosen for stained glass windows were rich in religious symbolism. For example, specific colors could represent virtues, vices, or divine attributes, while various scenes and figures illustrated theological concepts or revered individuals.\n\n4. Light: Stained glass windows contributed to the spiritual experience of the worshipers by creating a unique atmosphere within the cathedral. The windows allowed natural light to enter the space, yet the colors and designs subtly altered the illumination, invoking a sense of transcendent beauty and divine presence.\n\nIn conclusion, stained glass windows were not only a form of artistic expression and decoration in cathedrals but also served essential functions in guiding and inspiring the faith of those who worshiped within their walls. Through captivating aesthetics, storytelling, symbolism, and manipulation of light, these windows became an integral part of the religious and cultural heritage of many historic cathedrals." - } - ] - }, - { - "id": "000000475932", - "image": "000000475932.jpg", - "conversations": [ - { - "from": "human", - "value": "How many people are sitting on the park bench in the image?\n" - }, - { - "from": "gpt", - "value": "There are two people sitting on the park bench in the image." - }, - { - "from": "human", - "value": "What are the two people doing while sitting on the bench?" - }, - { - "from": "gpt", - "value": "The two people, both men, are sitting on the bench, talking, and laughing with each other." - }, - { - "from": "human", - "value": "What is the condition of the wooden bench they are sitting on?" - }, - { - "from": "gpt", - "value": "The wooden bench they are sitting on is old and worn." - }, - { - "from": "human", - "value": "What is unique about the appearance of one of the men sitting on the bench?" - }, - { - "from": "gpt", - "value": "One of the men sitting on the bench has red hair, which makes him stand out visually in the image." - }, - { - "from": "human", - "value": "Does the image include any other people, such as a woman or additional men?" - }, - { - "from": "gpt", - "value": "No, the image only includes two people, both of whom are men. There are no women or additional men in the image." - } - ] - }, - { - "id": "000000279071", - "image": "000000279071.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat type of fruit is shown in the image?" - }, - { - "from": "gpt", - "value": "The image shows a green apple hanging on a tree." - }, - { - "from": "human", - "value": "What is the color of the apple in the image?" - }, - { - "from": "gpt", - "value": "The color of the apple in the image is green." - }, - { - "from": "human", - "value": "How many apples are visible in the image?" - }, - { - "from": "gpt", - "value": "There is only one green apple visible in the image." - }, - { - "from": "human", - "value": "Can you see any leaves in the image? If so, what is their relation to the apple?" - }, - { - "from": "gpt", - "value": "Yes, there are leaves visible in the image. The leaves are sprouting around the green apple, growing from the same leafy branch as the fruit." - }, - { - "from": "human", - "value": "What stage of growth is the apple at?" - }, - { - "from": "gpt", - "value": "Though I can't pinpoint the exact stage of growth, the green apple in the image appears to be relatively large and well-developed, indicating that it is likely nearing maturity but not yet fully ripe. As apples mature, they typically change color from green to red, yellow, or a combination of these colors, depending on the variety. The fact that the apple is still green suggests that it is not yet fully ripe and may need more time on the tree before being ready for harvest." - } - ] - }, - { - "id": "000000503059", - "image": "000000503059.jpg", - "conversations": [ - { - "from": "human", - "value": "What type of vehicle is depicted in the image?\n" - }, - { - "from": "gpt", - "value": "The image depicts a truck, specifically an oil tanker truck, driving down a road." - }, - { - "from": "human", - "value": "What is the truck towing?" - }, - { - "from": "gpt", - "value": "The truck is towing a tanker, which is likely used for transporting oil or other liquid cargo." - }, - { - "from": "human", - "value": "What type of landscape surrounds the truck and the road?" - }, - { - "from": "gpt", - "value": "The road and the truck are surrounded by grass, and there is a hilly field in the background, suggesting a country or rural setting." - }, - { - "from": "human", - "value": "What color is the truck?" - }, - { - "from": "gpt", - "value": "The truck in the image is white." - }, - { - "from": "human", - "value": "Based on the surroundings, where might this truck be heading?" - }, - { - "from": "gpt", - "value": "While it's not possible to determine the exact destination of the truck, the rural setting and grassy environment suggest that it may be heading to or from an oil refinery, a storage facility, or a distribution point within a less urbanized area. Oil tanker trucks are typically used to transport oil between various points of the supply chain, and the truck's presence in a country or rural area implies that it might be servicing locations that are not directly connected to pipelines or railways for oil transportation. Alternatively, the truck could be passing through the countryside en route to an urban area to deliver its cargo." - } - ] - }, - { - "id": "000000531657", - "image": "000000531657.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat are the colors of the fire hydrant in the image?" - }, - { - "from": "gpt", - "value": "The fire hydrant in the image is yellow and black." - }, - { - "from": "human", - "value": "Is there any surrounding snow or ice around the fire hydrant?" - }, - { - "from": "gpt", - "value": "Yes, there is snow surrounding the fire hydrant, and it also has ice around it, indicating cold weather conditions." - }, - { - "from": "human", - "value": "What is the location of the fire hydrant, and is there anything parked nearby?" - }, - { - "from": "gpt", - "value": "The fire hydrant is located by the curb, and there is a car parked close to it." - }, - { - "from": "human", - "value": "What might be some concerns related to the fire hydrant being surrounded by snow and ice?" - }, - { - "from": "gpt", - "value": "Several concerns are related to the fire hydrant being surrounded by snow and ice. These concerns primarily revolve around accessibility, functionality, and safety.\n\n1. Accessibility: A fire hydrant surrounded by snow and ice can be challenging to locate and access during an emergency. Firefighters might lose precious time digging out the hydrant, which could negatively impact their ability to control or extinguish a fire.\n\n2. Functionality: Accumulation of ice on the hydrant might cause it to freeze, hindering its normal operation. Frozen parts can prevent the hydrant from functioning correctly at a critical time, delaying water access in case of a fire.\n\n3. Safety: A fire hydrant covered in snow and ice could become a safety hazard for pedestrians, as they might not notice the hydrant while walking and accidentally slip or fall. Additionally, snow and ice buildup around the hydrant makes it more difficult for drivers to identify parking restrictions, potentially causing problems with legal enforcement and leaving the car parked too close to the hydrant during emergencies.\n\nIn conclusion, the picture of the yellow and black fire hydrant surrounded by snow and ice highlights potential issues concerning accessibility, functionality, and safety. It is essential for local authorities and residents to be vigilant in maintaining and clearing snow and ice around fire hydrants to mitigate these concerns." - } - ] - }, - { - "id": "000000563299", - "image": "000000563299.jpg", - "conversations": [ - { - "from": "human", - "value": "What is the person doing in the image?\n" - }, - { - "from": "gpt", - "value": "The person in the image is skateboarding, riding on a skateboard at a skateboard park." - }, - { - "from": "human", - "value": "What kind of area is the person skateboarding in?" - }, - { - "from": "gpt", - "value": "The person is skateboarding in a skateboard park area made of cement, featuring ramps and walls covered in colorful graffiti." - }, - { - "from": "human", - "value": "Can you describe the graffiti in the background?" - }, - { - "from": "gpt", - "value": "The graffiti in the background is colorful and artistic, creating a vibrant and dynamic environment for the skateboard park area. The graffiti might consist of various patterns, abstract shapes, and text which are typical styles seen in urban art." - }, - { - "from": "human", - "value": "What kinds of surfaces and features are usually found in a skateboard park?" - }, - { - "from": "gpt", - "value": "A skateboard park typically includes a variety of surfaces and features designed to challenge and engage skaters of different skill levels. Some common elements found in a skateboard park are:\n\n1. Ramps: These can be quarter pipes, half pipes, or full pipes, which allow skaters to perform aerial tricks and transitions between different sections of the park.\n\n2. Rails and Ledges: These features provide skaters with the opportunity to perform grinds and slides, showcasing their board control and balance.\n\n3. Stairs and Gaps: Stair sets and gaps between platforms challenge skaters to perform ollies or other tricks that involve jumping and clearing a certain distance.\n\n4. Bowls and Pools: These are large, curved concrete basins that allow skaters to carve and perform flowing, surf-style moves.\n\n5. Flatground: An open, flat area where skaters can practice basic tricks such as ollies, kickflips, and manuals.\n\n6. Street-inspired features: These can include benches, curbs, and planter boxes that mimic obstacles found in urban environments, allowing skaters to hone their skills for street skating.\n\nThe combination of these various surfaces and features enables skaters to practice and progress in their skills while enjoying a diverse and engaging environment specifically designed for skateboarding." - } - ] - }, - { - "id": "000000058046", - "image": "000000058046.jpg", - "conversations": [ - { - "from": "human", - "value": "How many sheep are there in the image?\n" - }, - { - "from": "gpt", - "value": "There are five sheep in the image." - }, - { - "from": "human", - "value": "What is the appearance of the sheep's faces?" - }, - { - "from": "gpt", - "value": "The sheep have black faces." - }, - { - "from": "human", - "value": "What type of landscape is surrounding the group of sheep?" - }, - { - "from": "gpt", - "value": "The group of sheep is surrounded by a hilly landscape with mountains in the background." - }, - { - "from": "human", - "value": "Describe the formation of the sheep in the image." - }, - { - "from": "gpt", - "value": "The five sheep are standing in a row next to each other on a hillside." - }, - { - "from": "human", - "value": "How does the presence of the sheep and the hilly landscape affect the environment?" - }, - { - "from": "gpt", - "value": "The presence of the sheep and the hilly landscape create a serene and pastoral scene that signifies the coexistence of domestic animals and nature. The image showcases the adaptability of these sheep to their natural surroundings, having found a suitable environment that offers both food and shelter.\n\nSheep are commonly found in such landscapes to graze on the available vegetation. Their grazing habits can help to maintain and even enhance the landscape's biodiversity by controlling the growth of plants and promoting the more delicate and diverse flora. Moreover, these grazing activities may support the overall health of the ecosystem by preventing soil erosion, as the sheep's movements help to break up the soil and allow water to penetrate, reducing surface runoff.\n\nIn hilly and mountainous areas, agriculture might be challenging due to the steep slopes and harsh weather conditions. Therefore, pastoral farming, like raising sheep, becomes a viable option for farmers, as it enables food production while preserving the delicate natural balance of the environment. Sheep also provide multiple products, such as wool, meat, and milk, which contribute to the livelihood of the local communities.\n\nIn conclusion, the presence of the group of five sheep on the hilly area not only adds to the picturesque quality of the scene but also exemplifies the symbiotic relationship between domestic animals and their surrounding natural landscape, contributing to the sustainability and wellbeing of both the environment and the local population." - } - ] - }, - { - "id": "000000174930", - "image": "000000174930.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhat is happening in the image?" - }, - { - "from": "gpt", - "value": "In the image, several people are standing on a train platform at a station while a train pulls up. They are waiting to board the commuting train that has just arrived." - }, - { - "from": "human", - "value": "What is the surface of the platform near the train made of?" - }, - { - "from": "gpt", - "value": "The platform near the train is made of bricks, as the people are standing on a brick sidewalk." - }, - { - "from": "human", - "value": "What are the people doing as the train arrives?" - }, - { - "from": "gpt", - "value": "As the train arrives, the people are waiting on the platform, preparing to board it for their commute." - }, - { - "from": "human", - "value": "Does the train appear to be in motion, or is it stopped for passengers to board?" - }, - { - "from": "gpt", - "value": "The train appears to have stopped or is in the process of stopping to allow passengers to board, as the people are standing close to it and are ready to get on." - } - ] - }, - { - "id": "000000489497", - "image": "000000489497.jpg", - "conversations": [ - { - "from": "human", - "value": "What color is the refrigerator in the image?\n" - }, - { - "from": "gpt", - "value": "The color of the refrigerator in the image is white." - }, - { - "from": "human", - "value": "What is the state of the refrigerator's contents?" - }, - { - "from": "gpt", - "value": "The refrigerator is almost empty, with a small amount of food inside, indicating a lack of items stocked." - }, - { - "from": "human", - "value": "Is the refrigerator's door open or closed?" - }, - { - "from": "gpt", - "value": "The refrigerator's door is open, showing the contents inside." - }, - { - "from": "human", - "value": "Are there any other kitchen appliances visible in the image?" - }, - { - "from": "gpt", - "value": "Yes, there is an open toaster oven visible in the image, in addition to the refrigerator and food shelves." - }, - { - "from": "human", - "value": "What can having an almost empty refrigerator imply?" - }, - { - "from": "gpt", - "value": "Having an almost empty refrigerator can imply several possibilities. Here are a few potential reasons:\n\n1. Financial constraints: The person owning the refrigerator might be facing financial difficulties that limit their ability to purchase sufficient groceries.\n\n2. Living alone or with few household members: A nearly empty refrigerator could indicate that the person lives alone or with few household members, reducing the need for stocking up on extensive quantities of food.\n\n3. Infrequent home cooking: The refrigerator's owner might rely heavily on eating out, ordering takeout, or opting for pre-packaged meals, resulting in a decreased need to store fresh ingredients and food items.\n\n4. Recently moved in: The person may have recently moved to a new location and has not yet had the chance to fully stock the refrigerator with food.\n\n5. Imminent travel plans: The refrigerator’s owner could be preparing to travel for an extended period, leading to a conscious decision not to purchase perishable items that may spoil in their absence.\n\nAs we only have an image of the opened refrigerator, it is not possible to definitively determine the reason behind the scarcity of food. However, these are some plausible explanations for the nearly empty state of the refrigerator." - } - ] - }, - { - "id": "000000272171", - "image": "000000272171.jpg", - "conversations": [ - { - "from": "human", - "value": "\nWhere are the people in the image?" - }, - { - "from": "gpt", - "value": "The people in the image are located on a beach, sitting in the sand close to the shore. They are near a small urban setting, possibly on the outskirts of downtown." - }, - { - "from": "human", - "value": "What are the people doing in the image?" - }, - { - "from": "gpt", - "value": "The people in the image are enjoying their time at the beach, sitting around on the sand, and hanging out with each other. They are not engaging in any specific activities and appear to be just relaxing and taking in the beach atmosphere." - }, - { - "from": "human", - "value": "Can you describe the surrounding area of the beach in the image?" - }, - { - "from": "gpt", - "value": "The beach in the image is situated near a small urban setting, likely on the outskirts of a downtown area. This suggests that the beach is located within a city or close to city life, providing a convenient spot for people to relax and unwind without having to travel far from their homes or workplaces. The presence of buildings nearby may be a sign of commercial establishments such as restaurants, cafes, or shops that could cater to the needs of beachgoers and tourists." - }, - { - "from": "human", - "value": "What kind of beach activities might people engage in, besides just sitting or hanging out?" - }, - { - "from": "gpt", - "value": "There are many activities that people could potentially engage in at a beach. Some of these activities include:\n\n1. Swimming: Beaches are popular spots for swimming, either for leisure or fitness. People often enjoy cooling off in the water, especially on hot summer days.\n\n2. Sunbathing: Many beachgoers enjoy sunbathing on the sand, either to get a tan, relax, or simply enjoy the warmth of the sun.\n\n3. Beach sports: People may participate in beach sports such as beach volleyball, soccer, frisbee, paddleball, or even beach yoga. These activities are enjoyable ways to stay active and socialize with others.\n\n4. Beachcombing and exploring: Walking along the shoreline and collecting seashells, rocks, or other interesting items can be a fun beach activity, as well as strolling by rock formations or tide pools to observe the marine life.\n\n5. Building sandcastles: This is a classic beach activity, ideal for families with children or those who enjoy unleashing their creativity by crafting elaborate sand structures.\n\n6. Surfing or bodyboarding: For those who enjoy water sports, beaches with suitable waves may provide opportunities for surfing or bodyboarding.\n\n7. Picnics: Beachgoers can bring a picnic basket and enjoy a meal with their friends and family right by the ocean.\n\n8. Photography: Beaches often provide stunning backdrops for photography, with their natural beauty, sunsets, sunrises, and other scenic moments.\n\nIn the image at hand, the people are not actively participating in any of these activities. They are simply sitting around, hanging out, and enjoying the beach atmosphere." - } - ] - } -] diff --git a/data/llava_instruct_example.json b/data/llava_instruct_example.json new file mode 100644 index 00000000..908b0695 --- /dev/null +++ b/data/llava_instruct_example.json @@ -0,0 +1,65 @@ +[ + { + "messages": [ + { + "content": "Who are they?", + "role": "user" + }, + { + "content": "They're Kane and Gretzka from Bayern Munich.", + "role": "assistant" + }, + { + "content": "What are they doing?", + "role": "user" + }, + { + "content": "They are celebrating on the soccer field", + "role": "assistant" + } + ], + "image": "1.jpg" + }, + { + "messages": [ + { + "content": "Who is he?", + "role": "user" + }, + { + "content": "He's Thomas Muller from Bayern Munich.", + "role": "assistant" + }, + { + "content": "Why is he on the ground?", + "role": "user" + }, + { + "content": "Because he's sliding on his knees to celebrate.", + "role": "assistant" + } + ], + "image": "2.jpg" + }, + { + "messages": [ + { + "content": "Please describe this image", + "role": "user" + }, + { + "content": "Chinese astronaut Gui Haichao is giving a speech.", + "role": "assistant" + }, + { + "content": "What has he accomplished?", + "role": "user" + }, + { + "content": "He was appointed to be a payload specialist on Shenzhou 16 mission in June 2022, thus becoming the first Chinese civilian of Group 3 in space on 30 May 2023. He is responsible for the on-orbit operation of space science experimental payloads.", + "role": "assistant" + } + ], + "image": "3.jpg" + } +] \ No newline at end of file diff --git a/data/mllm_example_dataset/README.md b/data/mllm_example_dataset/README.md new file mode 100644 index 00000000..d5c8c0e6 --- /dev/null +++ b/data/mllm_example_dataset/README.md @@ -0,0 +1,25 @@ +--- +dataset_info: + features: + - name: messages + list: + - name: content + list: + - name: index + dtype: int64 + - name: text + dtype: string + - name: type + dtype: string + - name: role + dtype: string + - name: images + sequence: image +configs: +- config_name: default + data_files: + - split: train + path: data/train-* + - split: test + path: data/test-* +--- \ No newline at end of file diff --git a/data/mllm_example_dataset/data/test-0.parquet b/data/mllm_example_dataset/data/test-0.parquet new file mode 100644 index 0000000000000000000000000000000000000000..42c20b192497168523c3d39447cdae4495085b84 GIT binary patch literal 4580 zcmdTIO>84qc_wRnlaN&LHsc*{qws2Fr_nZRJ5HP?tA%tXA^uq>&A)#Tg6ElM&t%6l zW@a46Sr)C3s!AL=RTT%s38AX0RKx*L#Q`A>sHgT&sS*OM5OAnaFCYOb-+N;_KV-Ko zDiw^9H}8G_|8J5_d3#m}2xG!{K^PFYD;zgC!F3;j6XHT@MwkS2NC-`cgFVd2E+S4} z00p|Mr68=q5`I~@OgC>ge)D7d89u~G5BUip5KVKTvFOoYgSalP6E|QQu6LQ3q(Ln2 zvT9o%yw3oGhNY1sVIVRYd6$oTz)OqL;FFjXodG{NVSs~W3|<@u=Z8bWuLlc)$UOfj z@aNnpz>B(#utSiilg{!C0Cr*X902ZMiy&-iDB}?C_%22@$8I16cZg%5^_FF*dVHJ- zz#a-K6G*cStG+xvnhjRwRdBBAU=JY3ws>P6xwfhj2h|K>YQv*1Yle$-vMhmsEP2jF zySm-LF32@a{>IujI8F>Ma-E^YaJf)-?3Sf1CxBETDsH(1>HNlsYZ}_kMpPvX5(fPocD;ve`lr& zf>9H9OdD%_PE-v{v(5kZ4?8-sj&-|*W*1Pya9zW;kYS;G3*wwgrcyJIgO-b`1T@gN zK}H~4j_aY7(Vyz7acGzZwvb(ejM%Lvnxouq6RX!(#^;h~9e4QkvAHu4)iVC5Rj~iDS@$#AeTYHxA`usg7>WF8Sb8`j{q@qsr)&I$Efy1`0_{8)E#Vgf z;2<@Gy2y$TqA3YCPDPWOW|oRUh6b{yv;BC`rk%XZ2bDqT!z=$`6go}9qVNMg@+L1m zxFWqBo>(}i^g=&w2=SgD!Y1_ty3QGbH-@@(J#2a43phIi)5M>bR4gSDhx#Ny9)9r> zticbTqTYxBKKR37>D!UMH`$9vV!*LYYPcBr5g+*(FTE;A?~F6U&uh5Qc$j+yFpnmI z&cAEI zCI&>0-9F_=VBpdeC;jr7$NLg!VoztTyg3m0)`0Z-HR%^o8rw4p9;x#pm!p3lOLJn# zRdv$9LV!yTi2c06JsT?A22;}kk=<}`83Dd``T5knEAW$uLvV{-AAoM5mm?>Pz@GKb zb*@0~@h$+0{tSQ?Qx^c5yZQYjRRLd`pZTt28o92ZNGLiHHUD>R_y4a!`B@LN|MJNB z6z9ih-@cKbKOG54gOS&+z{gy4M*N)Ks@LJ(u3?pkZw=gw8CK1X-9q)uVQOO&BDdBRcWjpRJP<}%1b)=X0=f{%pKTu*f%Q0wWL17zNC-8)&Nj5N{w3etSwT_2 z-=?Jqabcv4{3O!y7dR0$u><4OyQwytH?iZ`ZFEQ+_UH0!I-ZQDq9%Opo%`Wl8HT_5 I;r~1T1e%nlz5oCK literal 0 HcmV?d00001 diff --git a/data/mllm_example_dataset/data/train-0.parquet b/data/mllm_example_dataset/data/train-0.parquet new file mode 100644 index 0000000000000000000000000000000000000000..42c20b192497168523c3d39447cdae4495085b84 GIT binary patch literal 4580 zcmdTIO>84qc_wRnlaN&LHsc*{qws2Fr_nZRJ5HP?tA%tXA^uq>&A)#Tg6ElM&t%6l zW@a46Sr)C3s!AL=RTT%s38AX0RKx*L#Q`A>sHgT&sS*OM5OAnaFCYOb-+N;_KV-Ko zDiw^9H}8G_|8J5_d3#m}2xG!{K^PFYD;zgC!F3;j6XHT@MwkS2NC-`cgFVd2E+S4} z00p|Mr68=q5`I~@OgC>ge)D7d89u~G5BUip5KVKTvFOoYgSalP6E|QQu6LQ3q(Ln2 zvT9o%yw3oGhNY1sVIVRYd6$oTz)OqL;FFjXodG{NVSs~W3|<@u=Z8bWuLlc)$UOfj z@aNnpz>B(#utSiilg{!C0Cr*X902ZMiy&-iDB}?C_%22@$8I16cZg%5^_FF*dVHJ- zz#a-K6G*cStG+xvnhjRwRdBBAU=JY3ws>P6xwfhj2h|K>YQv*1Yle$-vMhmsEP2jF zySm-LF32@a{>IujI8F>Ma-E^YaJf)-?3Sf1CxBETDsH(1>HNlsYZ}_kMpPvX5(fPocD;ve`lr& zf>9H9OdD%_PE-v{v(5kZ4?8-sj&-|*W*1Pya9zW;kYS;G3*wwgrcyJIgO-b`1T@gN zK}H~4j_aY7(Vyz7acGzZwvb(ejM%Lvnxouq6RX!(#^;h~9e4QkvAHu4)iVC5Rj~iDS@$#AeTYHxA`usg7>WF8Sb8`j{q@qsr)&I$Efy1`0_{8)E#Vgf z;2<@Gy2y$TqA3YCPDPWOW|oRUh6b{yv;BC`rk%XZ2bDqT!z=$`6go}9qVNMg@+L1m zxFWqBo>(}i^g=&w2=SgD!Y1_ty3QGbH-@@(J#2a43phIi)5M>bR4gSDhx#Ny9)9r> zticbTqTYxBKKR37>D!UMH`$9vV!*LYYPcBr5g+*(FTE;A?~F6U&uh5Qc$j+yFpnmI z&cAEI zCI&>0-9F_=VBpdeC;jr7$NLg!VoztTyg3m0)`0Z-HR%^o8rw4p9;x#pm!p3lOLJn# zRdv$9LV!yTi2c06JsT?A22;}kk=<}`83Dd``T5knEAW$uLvV{-AAoM5mm?>Pz@GKb zb*@0~@h$+0{tSQ?Qx^c5yZQYjRRLd`pZTt28o92ZNGLiHHUD>R_y4a!`B@LN|MJNB z6z9ih-@cKbKOG54gOS&+z{gy4M*N)Ks@LJ(u3?pkZw=gw8CK1X-9q)uVQOO&BDdBRcWjpRJP<}%1b)=X0=f{%pKTu*f%Q0wWL17zNC-8)&Nj5N{w3etSwT_2 z-=?Jqabcv4{3O!y7dR0$u><4OyQwytH?iZ`ZFEQ+_UH0!I-ZQDq9%Opo%`Wl8HT_5 I;r~1T1e%nlz5oCK literal 0 HcmV?d00001 diff --git a/examples/mllm/sft_instructblip.sh b/examples/mllm/sft_instructblip.sh index 92478500..b3923655 100644 --- a/examples/mllm/sft_instructblip.sh +++ b/examples/mllm/sft_instructblip.sh @@ -3,20 +3,20 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --stage sft_mm \ --do_train \ - --model_name_or_path /home/LAB/fengzc/LLM/checkpoints/Salesforce/instructblip-vicuna-7b \ - --dataset llava_instruct_100 \ + --model_name_or_path Salesforce/instructblip-vicuna-7b \ + --dataset mllm_instruct_example \ --dataset_dir data \ --template default \ --finetuning_type lora \ - --lora_target q_proj,k_proj \ + --lora_target all \ --output_dir saves/instructblip-vicuna-7b/lora/sft \ --overwrite_cache \ --overwrite_output_dir \ --cutoff_len 1024 \ --preprocessing_num_workers 16 \ - --per_device_train_batch_size 4 \ + --per_device_train_batch_size 3 \ --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 8 \ + --gradient_accumulation_steps 1 \ --lr_scheduler_type cosine \ --logging_steps 1 \ --warmup_steps 20 \ @@ -25,10 +25,8 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --evaluation_strategy steps \ --load_best_model_at_end \ --learning_rate 1e-5 \ - --num_train_epochs 3.0 \ + --num_train_epochs 50 \ --max_samples 3000 \ --val_size 0.1 \ --plot_loss \ - --quantization_bit 8 \ - --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 \ - --use_qformer \ No newline at end of file + --bf16 \ No newline at end of file diff --git a/examples/mllm/sft_blip2.sh b/examples/mllm/sft_llava.sh similarity index 58% rename from examples/mllm/sft_blip2.sh rename to examples/mllm/sft_llava.sh index ac0a3f11..c1fce693 100644 --- a/examples/mllm/sft_blip2.sh +++ b/examples/mllm/sft_llava.sh @@ -3,20 +3,20 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --stage sft_mm \ --do_train \ - --model_name_or_path /home/LAB/fengzc/LLM/checkpoints/Salesforce/blip2-opt-2.7b \ - --dataset llava_instruct_100 \ + --model_name_or_path llava-hf/llava-1.5-7b-hf \ + --dataset mllm_instruct_example \ --dataset_dir data \ --template default \ --finetuning_type lora \ - --lora_target q_proj,k_proj \ - --output_dir saves/blip2-opt-2.7b/lora/sft \ + --lora_target all \ + --output_dir saves/llava-1.5-7b/lora/sft \ --overwrite_cache \ --overwrite_output_dir \ --cutoff_len 1024 \ --preprocessing_num_workers 16 \ - --per_device_train_batch_size 4 \ + --per_device_train_batch_size 3 \ --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 8 \ + --gradient_accumulation_steps 1 \ --lr_scheduler_type cosine \ --logging_steps 1 \ --warmup_steps 20 \ @@ -25,9 +25,8 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ --evaluation_strategy steps \ --load_best_model_at_end \ --learning_rate 5e-5 \ - --num_train_epochs 3.0 \ + --num_train_epochs 100 \ --max_samples 3000 \ --val_size 0.1 \ --plot_loss \ - --quantization_bit 8 \ - --image_path /home/LAB/fengzc/LLM/checkpoints/liuhaotian/LLaVA-Instruct-150K/images/coco/train2017 \ No newline at end of file + --bf16 \ No newline at end of file diff --git a/scripts/make_mllm_instruct.py b/scripts/make_mllm_instruct.py new file mode 100644 index 00000000..41e13b8e --- /dev/null +++ b/scripts/make_mllm_instruct.py @@ -0,0 +1,95 @@ +import json +import os.path + +import fire +from datasets import Dataset, concatenate_datasets, load_dataset, Value, Image, Features, Sequence + +"""usage +python3 scripts/make_mllm_instruct.py \ +--json_path data/llava_instruct_example.json \ +--image_path data/images \ +--output_path data/mllm_example_dataset +""" + + +def make_one_json(json_path, image_path) -> Dataset: + with open(json_path) as f: + raw_data_ls = json.loads(f.read()) + data_ls = [] + for i, data in enumerate(raw_data_ls): + for j, message in enumerate(data['messages']): + text = message['content'] + message['content'] = [{'index': None, 'text': text, 'type': 'text'}] + if j == 0: + message['content'].append({'index': 0, 'text': None, 'type': 'image'}) + image = data['image'] + if image_path: + image = os.path.join(image_path, data['image']) + data['images'] = [image] + del data['image'] + data_ls.append(data) + + def gen(): + for data in data_ls: + yield data + + features = Features({'messages': [{'content': [ + {'index': Value(dtype='int64', id=None), 'text': Value(dtype='string', id=None), + 'type': Value(dtype='string', id=None)}], 'role': Value(dtype='string', id=None)}], + 'images': Sequence(feature=Image(decode=True, id=None), length=-1, id=None)}) + dataset = Dataset.from_generator(gen, features=features) + return dataset + + +yaml_content = """--- +dataset_info: + features: + - name: messages + list: + - name: content + list: + - name: index + dtype: int64 + - name: text + dtype: string + - name: type + dtype: string + - name: role + dtype: string + - name: images + sequence: image +configs: +- config_name: default + data_files: + - split: train + path: data/train-* + - split: test + path: data/test-* +---""" + + +def main( + json_path: str, + image_path: str, + output_path: str, +): + json_path_list = json_path.split() + dataset_list = [] + for json_path in json_path_list: + dataset = make_one_json(json_path, image_path) + dataset_list.append(dataset) + dataset = concatenate_datasets(dataset_list) + print(dataset[0]) + data_path = os.path.join(output_path, "data") + os.makedirs(os.path.join(data_path), exist_ok=True) + parquet_path = os.path.join(data_path, "train-0.parquet") + dataset.to_parquet(parquet_path) + parquet_path = os.path.join(data_path, "test-0.parquet") + dataset.to_parquet(parquet_path) + readme_path = os.path.join(output_path, "README.md") + with open(readme_path, 'w') as f: + f.write(yaml_content) + + +if __name__ == '__main__': + fire.Fire(main) diff --git a/scripts/test_mllm.py b/scripts/test_mllm.py new file mode 100644 index 00000000..c03525b8 --- /dev/null +++ b/scripts/test_mllm.py @@ -0,0 +1,84 @@ +import os.path + +import fire +import torch +from datasets import load_dataset +from peft import PeftModel +from transformers import AutoTokenizer, AutoModelForVision2Seq, AutoProcessor + +"""usage +python3 scripts/test_mllm.py \ +--base_model_path llava-hf/llava-1.5-7b-hf \ +--lora_model_path saves/llava-1.5-7b/lora/sft \ +--model_path saves/llava-1.5-7b/lora/merged \ +--dataset_name data/mllm_example_dataset \ +--do_merge 1 +""" + + +def get_processor(model_path): + CHAT_TEMPLATE = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" + tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) + tokenizer.chat_template = CHAT_TEMPLATE + processor = AutoProcessor.from_pretrained(model_path) + processor.tokenizer = tokenizer + return processor + + +def apply_lora(base_model_path, model_path, lora_path): + print(f"Loading the base model from {base_model_path}") + base_model = AutoModelForVision2Seq.from_pretrained( + base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="cuda", + ) + processor = get_processor(base_model_path) + tokenizer = processor.tokenizer + print(f"Loading the LoRA adapter from {lora_path}") + + lora_model = PeftModel.from_pretrained( + base_model, + lora_path, + torch_dtype=torch.float16, + ) + + print("Applying the LoRA") + model = lora_model.merge_and_unload() + + print(f"Saving the target model to {model_path}") + model.save_pretrained(model_path) + tokenizer.save_pretrained(model_path) + processor.image_processor.save_pretrained(model_path) + + +def main( + model_path: str, + dataset_name: str, + base_model_path: str = "", + lora_model_path: str = "", + do_merge: bool = False, +): + if not os.path.exists(model_path) or do_merge: + apply_lora(base_model_path, model_path, lora_model_path) + model = AutoModelForVision2Seq.from_pretrained( + model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, device_map="cuda" + ) + processor = get_processor(model_path) + raw_datasets = load_dataset(dataset_name) + train_dataset = raw_datasets['train'] + examples = train_dataset.select(range(3)) + texts = [] + images = [] + for example in examples: + messages = example["messages"][:1] + text = processor.tokenizer.apply_chat_template( + messages, tokenize=False, add_generation_prompt=False + ) + texts.append(text) + images.append(example["images"][0]) + batch = processor(texts, images, return_tensors="pt", padding=True).to("cuda") + output = model.generate(**batch, max_new_tokens=100) + res = processor.batch_decode(output, skip_special_tokens=True) + print(res) + + +if __name__ == '__main__': + fire.Fire(main) diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index b3af434b..18665731 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -199,8 +199,7 @@ def get_mm_dataset( with training_args.main_process_first(desc="load dataset"): all_datasets = [] for dataset_attr in get_dataset_list(data_args): - local_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name) - all_datasets.append(load_dataset("json", data_files=local_path)['train']) + all_datasets.append(load_dataset(dataset_attr.dataset_name)['train']) dataset = merge_dataset(all_datasets, data_args, training_args) return dataset diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index b8edfa10..8494ba7e 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -275,4 +275,4 @@ def get_preprocess_and_print_func( ) print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) - return preprocess_func, print_function + return preprocess_func, print_function \ No newline at end of file diff --git a/src/llmtuner/hparams/data_args.py b/src/llmtuner/hparams/data_args.py index 3b52f1ea..f5f75c77 100644 --- a/src/llmtuner/hparams/data_args.py +++ b/src/llmtuner/hparams/data_args.py @@ -88,10 +88,6 @@ class DataArguments: default=None, metadata={"help": "Path to save or load the tokenized datasets."}, ) - image_path: Optional[str] = field( - default=None, - metadata={"help": "Path to images."}, - ) def __post_init__(self): if self.reserved_label_len >= self.cutoff_len: diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 32637f59..0e42033f 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -165,10 +165,6 @@ class ModelArguments: default=False, metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) - use_qformer: bool = field( - default=False, - metadata={"help": "Whether use qformer for Multimodal LLM."}, - ) def __post_init__(self): self.compute_dtype = None diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index 624d8a85..e66a984b 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -182,7 +182,8 @@ def init_adapter( def init_mm_adapter( model: "AutoModelForVision2Seq", model_args: "ModelArguments", finetuning_args: "FinetuningArguments", - is_trainable: bool + is_trainable: bool, + use_clm=True, ) -> "AutoModelForVision2Seq": if finetuning_args.finetuning_type == "lora": logger.info("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA")) @@ -253,12 +254,19 @@ def init_mm_adapter( } model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) else: - lora_config = LoraConfig( - # task_type=TaskType.CAUSAL_LM, - inference_mode=False, - use_dora=finetuning_args.use_dora, - **peft_kwargs, - ) + if use_clm: + lora_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + inference_mode=False, + use_dora=finetuning_args.use_dora, + **peft_kwargs, + ) + else: + lora_config = LoraConfig( + inference_mode=False, + use_dora=finetuning_args.use_dora, + **peft_kwargs, + ) model = get_peft_model(model, lora_config) if (not finetuning_args.pure_bf16) and (not finetuning_args.use_badam): diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index eeee69a6..917f11c9 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -191,6 +191,7 @@ def load_mm_model( finetuning_args: "FinetuningArguments", is_trainable: bool = False, add_valuehead: bool = False, + use_clm=True, ) -> "AutoModelForVision2Seq": r""" Loads pretrained model. Must after load_tokenizer. @@ -231,7 +232,7 @@ def load_mm_model( patch_model(model, tokenizer, model_args, is_trainable) register_autoclass(config, model, tokenizer) - model = init_mm_adapter(model, model_args, finetuning_args, is_trainable) + model = init_mm_adapter(model, model_args, finetuning_args, is_trainable, use_clm) if not is_trainable: model.requires_grad_(False) diff --git a/src/llmtuner/train/sftmm/collator.py b/src/llmtuner/train/sftmm/collator.py index e91374bc..95dbd939 100644 --- a/src/llmtuner/train/sftmm/collator.py +++ b/src/llmtuner/train/sftmm/collator.py @@ -1,69 +1,29 @@ -import json -import os from dataclasses import dataclass - -import torch -from torch.utils.data import Dataset as Dataset_torch -from datasets import Dataset -from PIL import Image from transformers import AutoProcessor -class ImageCaptioningDataset(Dataset_torch): - def __init__(self, dataset: Dataset, image_path: str, processor: AutoProcessor): - self.processor = processor - self.dataset = dataset - self.image_path = image_path - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - source = self.dataset[idx] - image_id = source['image'] - image = Image.open(os.path.join(self.image_path, image_id)) - convs = source['conversations'] - prompt = convs[0]['value'] - label = convs[1]['value'] - image_inputs = self.processor(image, return_tensors="pt") - image_inputs = {k: v.squeeze() for k, v in image_inputs.items()} - inputs = { - "input_ids": prompt, - "labels": label, - } - for key in image_inputs: - inputs[key] = image_inputs[key] - return inputs - - @dataclass class DataCollatorForVis2Seq: processor: AutoProcessor - use_qformer: bool = False - def __call__(self, features, return_tensors=None): - processed_batch = {} - for key in features[0].keys(): - if key == 'pixel_values': - processed_batch[key] = torch.stack([example[key] for example in features]) - elif key == 'input_ids': - text_inputs = self.processor.tokenizer( - [example[key] for example in features], padding="max_length", return_tensors="pt", - max_length=512, - ) - processed_batch["input_ids"] = text_inputs["input_ids"] - processed_batch["attention_mask"] = text_inputs["attention_mask"] - if self.use_qformer: - qformer_text_inputs = self.processor.qformer_tokenizer( - [example[key] for example in features], padding="max_length", return_tensors="pt", - max_length=512, - ) - processed_batch["qformer_input_ids"] = qformer_text_inputs["input_ids"] - processed_batch["qformer_attention_mask"] = qformer_text_inputs["attention_mask"] - elif key == 'labels': - text_inputs = self.processor.tokenizer( - [example[key] for example in features], padding="max_length", return_tensors="pt", - max_length=512, - ) - processed_batch["labels"] = text_inputs["input_ids"] - return processed_batch + def __call__(self, examples): + texts = [] + images = [] + for example in examples: + if len(example["images"]) > 1: + raise ValueError("This collator only supports one image per example") + messages = example["messages"] + text = self.processor.tokenizer.apply_chat_template( + messages, tokenize=False, add_generation_prompt=False + ) + texts.append(text) + images.append(example["images"][0]) + + batch = self.processor(text=texts, images=images, return_tensors="pt", padding=True) + + labels = batch["input_ids"].clone() + if self.processor.tokenizer.pad_token_id is not None: + labels[labels == self.processor.tokenizer.pad_token_id] = -100 + batch["labels"] = labels + + return batch diff --git a/src/llmtuner/train/sftmm/trainer.py b/src/llmtuner/train/sftmm/trainer.py index 96b86b44..f094e609 100644 --- a/src/llmtuner/train/sftmm/trainer.py +++ b/src/llmtuner/train/sftmm/trainer.py @@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import numpy as np import torch -from transformers import Seq2SeqTrainer +from transformers import Seq2SeqTrainer, Trainer from ...extras.constants import IGNORE_INDEX from ...extras.logging import get_logger @@ -32,23 +32,6 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer): self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator) - # def compute_loss(self, model, inputs, return_outputs=False): - # print(inputs.keys()) - # device = "cuda" - # input_ids = inputs.get("input_ids").to(device) - # pixel_values = inputs.get("pixel_values").to(device, torch.float16) - # attention_mask = inputs.get("attention_mask").to(device) - # labels = inputs.get("labels").to(device) - # - # outputs = model(input_ids=input_ids, - # pixel_values=pixel_values, - # labels=labels, - # # attention_mask=attention_mask, - # ) - # loss = outputs.loss - # print("Loss:", loss.item()) - # return (loss, outputs) if return_outputs else loss - def create_optimizer(self) -> "torch.optim.Optimizer": if self.optimizer is None: self.optimizer = create_custom_optimzer(self.model, self.args, self.finetuning_args) @@ -59,79 +42,3 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer): ) -> "torch.optim.lr_scheduler.LRScheduler": create_custom_scheduler(self.args, num_training_steps, optimizer) return super().create_scheduler(num_training_steps, optimizer) - - def prediction_step( - self, - model: "torch.nn.Module", - inputs: Dict[str, Union[torch.Tensor, Any]], - prediction_loss_only: bool, - ignore_keys: Optional[List[str]] = None, - ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: - r""" - Removes the prompt part in the generated tokens. - - Subclass and override to inject custom behavior. - """ - labels = inputs["labels"].detach().clone() if "labels" in inputs else None # backup labels - if self.args.predict_with_generate: - assert self.tokenizer.padding_side == "left", "This method only accepts left-padded tensor." - prompt_len, label_len = inputs["input_ids"].size(-1), inputs["labels"].size(-1) - if prompt_len > label_len: - inputs["labels"] = self._pad_tensors_to_target_len(inputs["labels"], inputs["input_ids"]) - if label_len > prompt_len: # truncate the labels instead of padding the inputs (llama2 fp16 compatibility) - inputs["labels"] = inputs["labels"][:, :prompt_len] - - loss, generated_tokens, _ = super().prediction_step( # ignore the returned labels (may be truncated) - model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys - ) - if generated_tokens is not None and self.args.predict_with_generate: - generated_tokens[:, :prompt_len] = self.tokenizer.pad_token_id - generated_tokens = generated_tokens.contiguous() - - return loss, generated_tokens, labels - - def _pad_tensors_to_target_len(self, src_tensor: torch.Tensor, tgt_tensor: torch.Tensor) -> torch.Tensor: - r""" - Pads the tensor to the same length as the target tensor. - """ - assert self.tokenizer.pad_token_id is not None, "Pad token is required." - padded_tensor = self.tokenizer.pad_token_id * torch.ones_like(tgt_tensor) - padded_tensor[:, -src_tensor.shape[-1]:] = src_tensor # adopt left-padding - return padded_tensor.contiguous() # in contiguous memory - - def save_predictions(self, predict_results: "PredictionOutput") -> None: - r""" - Saves model predictions to `output_dir`. - - A custom behavior that not contained in Seq2SeqTrainer. - """ - if not self.is_world_process_zero(): - return - - output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl") - logger.info(f"Saving prediction results to {output_prediction_file}") - - labels = np.where( - predict_results.label_ids != IGNORE_INDEX, predict_results.label_ids, self.tokenizer.pad_token_id - ) - preds = np.where( - predict_results.predictions != IGNORE_INDEX, predict_results.predictions, self.tokenizer.pad_token_id - ) - - for i in range(len(preds)): - pad_len = np.nonzero(preds[i] != self.tokenizer.pad_token_id)[0] - if len(pad_len): - preds[i] = np.concatenate( - (preds[i][pad_len[0]:], preds[i][: pad_len[0]]), axis=-1 - ) # move pad token to last - - decoded_labels = self.tokenizer.batch_decode( - labels, skip_special_tokens=True, clean_up_tokenization_spaces=False - ) - decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True) - - with open(output_prediction_file, "w", encoding="utf-8") as writer: - res: List[str] = [] - for label, pred in zip(decoded_labels, decoded_preds): - res.append(json.dumps({"label": label, "predict": pred}, ensure_ascii=False)) - writer.write("\n".join(res)) diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py index 9f952772..21f4aebf 100644 --- a/src/llmtuner/train/sftmm/workflow.py +++ b/src/llmtuner/train/sftmm/workflow.py @@ -1,21 +1,14 @@ # Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py import os from typing import TYPE_CHECKING, List, Optional - -import torch -from PIL import Image -from torch.utils.data import Dataset -from transformers import DataCollatorForSeq2Seq, LlavaNextForConditionalGeneration, AutoModelForVision2Seq - from ...data import split_dataset, get_mm_dataset -from ...extras.constants import IGNORE_INDEX from ...extras.misc import get_logits_processor from ...extras.ploting import plot_loss -from ...model import load_model, load_tokenizer, load_processor, load_mm_model +from ...model import load_tokenizer, load_processor, load_mm_model from ..utils import create_modelcard_and_push from .metric import ComputeMetrics from .trainer import CustomSeq2SeqTrainer -from .collator import DataCollatorForVis2Seq, ImageCaptioningDataset +from .collator import DataCollatorForVis2Seq if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments, TrainerCallback @@ -32,28 +25,27 @@ def run_sft_mm( callbacks: Optional[List["TrainerCallback"]] = None, ): processor = load_processor(model_args) - tokenizer = processor.tokenizer - model = load_mm_model(processor, model_args, finetuning_args, training_args.do_train) + tokenizer = load_tokenizer(model_args) + CHAT_TEMPLATE = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" + tokenizer.chat_template = CHAT_TEMPLATE + processor.tokenizer = tokenizer + use_clm = True + if "blip" in model_args.model_name_or_path: + use_clm = False + model = load_mm_model(processor, model_args, finetuning_args, training_args.do_train, use_clm=use_clm) dataset = get_mm_dataset(processor, model_args, data_args, training_args, stage="sft") - if training_args.predict_with_generate: - tokenizer.padding_side = "left" # use left-padding in generation if getattr(model, "is_quantized", False) and not training_args.do_train: setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction - splited_dataset = split_dataset(dataset, data_args, training_args) - splited_dataset['train_dataset'].set_format(type=splited_dataset['train_dataset'].format["type"], - columns=list(splited_dataset['train_dataset'].features.keys())) - splited_dataset['eval_dataset'].set_format(type=splited_dataset['eval_dataset'].format["type"], - columns=list(splited_dataset['eval_dataset'].features.keys())) - train_dataset = ImageCaptioningDataset(splited_dataset['train_dataset'], data_args.image_path, processor) - eval_dataset = ImageCaptioningDataset(splited_dataset['eval_dataset'], data_args.image_path, processor) + train_dataset = dataset + eval_dataset = dataset data_collator = DataCollatorForVis2Seq( processor=processor, - use_qformer=model_args.use_qformer, ) # Override the decoding parameters of Seq2SeqTrainer training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams + training_args.remove_unused_columns = False # Initialize our Trainer trainer = CustomSeq2SeqTrainer( @@ -67,7 +59,6 @@ def run_sft_mm( train_dataset=train_dataset, eval_dataset=eval_dataset, ) - # Keyword arguments for `model.generate` gen_kwargs = generating_args.to_dict() gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids From f85f403d39e0af6dd57f9c94290e4fa63b344f0d Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 00:56:06 +0800 Subject: [PATCH 05/34] remove conflicts --- scripts/test_mllm.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/test_mllm.py b/scripts/test_mllm.py index 882bf032..961f02bf 100644 --- a/scripts/test_mllm.py +++ b/scripts/test_mllm.py @@ -5,6 +5,7 @@ import torch from datasets import load_dataset from peft import PeftModel from transformers import AutoTokenizer, AutoModelForVision2Seq, AutoProcessor +import shutil """usage python3 scripts/test_mllm.py \ @@ -47,15 +48,14 @@ def apply_lora(base_model_path, model_path, lora_path): model.save_pretrained(model_path) tokenizer.save_pretrained(model_path) processor.image_processor.save_pretrained(model_path) - if 'instructblip' in model_path: - processor.qformer_tokenizer.save_pretrained(model_path) + def main( - model_path: str, - dataset_name: str, - base_model_path: str = "", - lora_model_path: str = "", - do_merge: bool = False, + model_path: str, + dataset_name: str, + base_model_path: str = "", + lora_model_path: str = "", + do_merge: bool = False, ): if not os.path.exists(model_path) or do_merge: apply_lora(base_model_path, model_path, lora_model_path) From 8239907f578dc4b185cfb1789944db23d112fa2c Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 01:01:59 +0800 Subject: [PATCH 06/34] remove error --- src/llmtuner/model/loader.py | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index f3856da7..a6c37922 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -202,29 +202,6 @@ def load_mm_model( patch_config(config, tokenizer, model_args, init_kwargs, is_trainable) model = None - if is_trainable and model_args.use_unsloth: - from unsloth import FastLanguageModel # type: ignore - - unsloth_kwargs = { - "model_name": model_args.model_name_or_path, - "max_seq_length": model_args.model_max_length, - "dtype": model_args.compute_dtype, - "load_in_4bit": model_args.quantization_bit == 4, - "token": model_args.hf_hub_token, - "device_map": {"": get_current_device()}, - "rope_scaling": getattr(config, "rope_scaling", None), - "fix_tokenizer": False, - "trust_remote_code": True, - } - try: - model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs) - except NotImplementedError: - logger.warning("Unsloth does not support model type {}.".format(getattr(config, "model_type", None))) - model_args.use_unsloth = False - - if model_args.adapter_name_or_path: - model_args.adapter_name_or_path = None - logger.warning("Unsloth does not support loading adapters.") if model is None: init_kwargs["config"] = config init_kwargs["pretrained_model_name_or_path"] = model_args.model_name_or_path From 838eb87a961894b072bf79a60bbda63516670d6f Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 08:20:41 +0800 Subject: [PATCH 07/34] merge model part to the text stream --- src/llmtuner/hparams/model_args.py | 4 + src/llmtuner/model/__init__.py | 3 +- src/llmtuner/model/adapter.py | 107 +-------------------------- src/llmtuner/model/loader.py | 75 ++++--------------- src/llmtuner/train/sftmm/workflow.py | 7 +- 5 files changed, 24 insertions(+), 172 deletions(-) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index b60492a0..a6e4b710 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -169,6 +169,10 @@ class ModelArguments: default=False, metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) + use_mllm: bool = field( + default=False, + metadata={"help": "Whether use Multimodal LLM."}, + ) def __post_init__(self): self.compute_dtype = None diff --git a/src/llmtuner/model/__init__.py b/src/llmtuner/model/__init__.py index 2bd73365..f6be60d8 100644 --- a/src/llmtuner/model/__init__.py +++ b/src/llmtuner/model/__init__.py @@ -1,10 +1,9 @@ -from .loader import load_config, load_model, load_tokenizer, load_mm_model +from .loader import load_config, load_model, load_tokenizer, load_processor from .utils.misc import find_all_linear_modules, load_valuehead_params __all__ = [ "load_config", "load_model", - "load_mm_model", "load_tokenizer", "load_processor", "load_valuehead_params", diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index 8079c028..bcefee92 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Union import torch from peft import LoraConfig, LoraModel, PeftModel, TaskType, get_peft_model @@ -21,11 +21,11 @@ logger = get_logger(__name__) def init_adapter( config: "PretrainedConfig", - model: "PreTrainedModel", + model: Union["PreTrainedModel","AutoModelForVision2Seq"], model_args: "ModelArguments", finetuning_args: "FinetuningArguments", is_trainable: bool, -) -> "PreTrainedModel": +) -> Union["PreTrainedModel","AutoModelForVision2Seq"]: r""" Initializes the adapters. @@ -195,103 +195,4 @@ def init_adapter( if model_args.adapter_name_or_path is not None: logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) - return model - - -def init_mm_adapter( - model: "AutoModelForVision2Seq", model_args: "ModelArguments", - finetuning_args: "FinetuningArguments", - is_trainable: bool, - use_clm=True, -) -> "AutoModelForVision2Seq": - if finetuning_args.finetuning_type == "lora": - logger.info("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA")) - adapter_to_resume = None - - if model_args.adapter_name_or_path is not None: - is_mergeable = True - if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable - assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter." - is_mergeable = False - - if is_deepspeed_zero3_enabled(): - assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3." - is_mergeable = False - - if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable): - adapter_to_merge = model_args.adapter_name_or_path[:-1] - adapter_to_resume = model_args.adapter_name_or_path[-1] - else: - adapter_to_merge = model_args.adapter_name_or_path - - for adapter in adapter_to_merge: - model: "LoraModel" = PeftModel.from_pretrained( - model, adapter, offload_folder=model_args.offload_folder - ) - model = model.merge_and_unload() - - if len(adapter_to_merge) > 0: - logger.info("Merged {} adapter(s).".format(len(adapter_to_merge))) - - if adapter_to_resume is not None: # resume lora training - model = PeftModel.from_pretrained( - model, adapter_to_resume, is_trainable=is_trainable, offload_folder=model_args.offload_folder - ) - - if is_trainable and adapter_to_resume is None: # create new lora weights while training - if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": - target_modules = find_all_linear_modules(model) - else: - target_modules = finetuning_args.lora_target - - if finetuning_args.use_llama_pro: - target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable) - - if ( - finetuning_args.use_dora - and getattr(model, "quantization_method", None) is not None - and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES - ): - raise ValueError("DoRA is not compatible with PTQ-quantized models.") - - peft_kwargs = { - "r": finetuning_args.lora_rank, - "target_modules": target_modules, - "lora_alpha": finetuning_args.lora_alpha, - "lora_dropout": finetuning_args.lora_dropout, - "use_rslora": finetuning_args.use_rslora, - "modules_to_save": finetuning_args.additional_target, - } - - if model_args.use_unsloth: - from unsloth import FastLanguageModel # type: ignore - - unsloth_peft_kwargs = { - "model": model, - "max_seq_length": model_args.model_max_length, - "use_gradient_checkpointing": "unsloth", - } - model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) - else: - if use_clm: - lora_config = LoraConfig( - task_type=TaskType.CAUSAL_LM, - inference_mode=False, - use_dora=finetuning_args.use_dora, - **peft_kwargs, - ) - else: - lora_config = LoraConfig( - inference_mode=False, - use_dora=finetuning_args.use_dora, - **peft_kwargs, - ) - model = get_peft_model(model, lora_config) - - if (not finetuning_args.pure_bf16) and (not finetuning_args.use_badam): - for param in filter(lambda p: p.requires_grad, model.parameters()): - param.data = param.data.to(torch.float32) - - if model_args.adapter_name_or_path is not None: - logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) - return model + return model \ No newline at end of file diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index a6c37922..3712a592 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -1,11 +1,11 @@ -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any, Dict, Union from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoProcessor, AutoModelForVision2Seq from trl import AutoModelForCausalLMWithValueHead from ..extras.logging import get_logger from ..extras.misc import count_parameters, try_download_model_from_ms -from .adapter import init_adapter, init_mm_adapter +from .adapter import init_adapter from .patcher import patch_config, patch_model, patch_tokenizer, patch_valuehead_model from .utils.misc import load_valuehead_params, register_autoclass from .utils.mod import convert_pretrained_model_to_mod, load_mod_pretrained_model @@ -106,12 +106,12 @@ def load_config(model_args: "ModelArguments") -> "PretrainedConfig": def load_model( - tokenizer: "PreTrainedTokenizer", - model_args: "ModelArguments", - finetuning_args: "FinetuningArguments", - is_trainable: bool = False, - add_valuehead: bool = False, -) -> "PreTrainedModel": + tokenizer: "PreTrainedTokenizer", + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: bool = False, + add_valuehead: bool = False, +) -> Union["PreTrainedModel", "AutoModelForVision2Seq"]: r""" Loads pretrained model. """ @@ -134,7 +134,10 @@ def load_model( if model_args.mixture_of_depths == "load": model = load_mod_pretrained_model(**init_kwargs) else: - model = AutoModelForCausalLM.from_pretrained(**init_kwargs) + if model_args.use_mllm: + model = AutoModelForVision2Seq.from_pretrained(**init_kwargs) + else: + model = AutoModelForCausalLM.from_pretrained(**init_kwargs) if model_args.mixture_of_depths == "convert": model = convert_pretrained_model_to_mod(model, config, model_args) @@ -182,56 +185,4 @@ def load_model( ) ) - return model - - -def load_mm_model( - processor: "AutoProcessor", - model_args: "ModelArguments", - finetuning_args: "FinetuningArguments", - is_trainable: bool = False, - add_valuehead: bool = False, - use_clm=True, -) -> "AutoModelForVision2Seq": - r""" - Loads pretrained model. Must after load_tokenizer. - """ - tokenizer = processor.tokenizer - init_kwargs = _get_init_kwargs(model_args) - config = AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs) - patch_config(config, tokenizer, model_args, init_kwargs, is_trainable) - - model = None - if model is None: - init_kwargs["config"] = config - init_kwargs["pretrained_model_name_or_path"] = model_args.model_name_or_path - model: "AutoModelForVision2Seq" = AutoModelForVision2Seq.from_pretrained(**init_kwargs) - patch_model(model, tokenizer, model_args, is_trainable) - register_autoclass(config, model, tokenizer) - - model = init_mm_adapter(model, model_args, finetuning_args, is_trainable, use_clm) - - if not is_trainable: - model.requires_grad_(False) - model.eval() - else: - model.train() - - trainable_params, all_param = count_parameters(model) - if is_trainable: - param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( - trainable_params, all_param, 100 * trainable_params / all_param - ) - else: - param_stats = "all params: {:d}".format(all_param) - logger.info(param_stats) - - if model_args.print_param_status: - for name, param in model.named_parameters(): - print( - "name: {}, dtype: {}, device: {}, trainable: {}".format( - name, param.dtype, param.device, param.requires_grad - ) - ) - - return model + return model \ No newline at end of file diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py index 21f4aebf..7afd8f6f 100644 --- a/src/llmtuner/train/sftmm/workflow.py +++ b/src/llmtuner/train/sftmm/workflow.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, List, Optional from ...data import split_dataset, get_mm_dataset from ...extras.misc import get_logits_processor from ...extras.ploting import plot_loss -from ...model import load_tokenizer, load_processor, load_mm_model +from ...model import load_tokenizer, load_processor, load_model from ..utils import create_modelcard_and_push from .metric import ComputeMetrics from .trainer import CustomSeq2SeqTrainer @@ -29,10 +29,7 @@ def run_sft_mm( CHAT_TEMPLATE = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" tokenizer.chat_template = CHAT_TEMPLATE processor.tokenizer = tokenizer - use_clm = True - if "blip" in model_args.model_name_or_path: - use_clm = False - model = load_mm_model(processor, model_args, finetuning_args, training_args.do_train, use_clm=use_clm) + model = load_model(processor.tokenizer, model_args, finetuning_args, training_args.do_train) dataset = get_mm_dataset(processor, model_args, data_args, training_args, stage="sft") if getattr(model, "is_quantized", False) and not training_args.do_train: setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction From c6dd89918feb25fe8c07857162421ad1706f791f Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 19:19:59 +0800 Subject: [PATCH 08/34] merge data part to the text stream --- data/dataset_info.json | 15 +- data/llava_instruct_example.json | 20 ++- examples/mllm/sft_instructblip.sh | 32 ---- scripts/test_mllm.py | 14 +- src/llmtuner/data/__init__.py | 3 +- src/llmtuner/data/aligner.py | 118 +++++++++++-- src/llmtuner/data/loader.py | 103 +++++------ src/llmtuner/data/parser.py | 37 +++- src/llmtuner/data/preprocess.py | 185 +++++++++++++++++--- src/llmtuner/data/template.py | 248 +++++++++++++++++++++------ src/llmtuner/hparams/model_args.py | 115 ++++++++++--- src/llmtuner/model/adapter.py | 100 ++++++++--- src/llmtuner/model/loader.py | 26 ++- src/llmtuner/train/sftmm/collator.py | 15 +- src/llmtuner/train/sftmm/workflow.py | 90 +++++++--- 15 files changed, 828 insertions(+), 293 deletions(-) delete mode 100644 examples/mllm/sft_instructblip.sh diff --git a/data/dataset_info.json b/data/dataset_info.json index 6c7088f6..f9adf108 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -418,6 +418,17 @@ "hf_hub_url": "HuggingFaceH4/llava-instruct-mix-vsft" }, "mllm_instruct_example": { - "hf_hub_url": "data/mllm_example_dataset" + "file_name": "llava_instruct_example.json", + "formatting": "llava", + "columns": { + "messages": "messages", + "images": "images" + }, + "tags": { + "role_tag": "role", + "content_tag": "content", + "user_tag": "user", + "assistant_tag": "assistant" + } } -} \ No newline at end of file +} diff --git a/data/llava_instruct_example.json b/data/llava_instruct_example.json index 908b0695..b5c97387 100644 --- a/data/llava_instruct_example.json +++ b/data/llava_instruct_example.json @@ -2,7 +2,7 @@ { "messages": [ { - "content": "Who are they?", + "content": "Who are they?", "role": "user" }, { @@ -18,12 +18,14 @@ "role": "assistant" } ], - "image": "1.jpg" + "images": [ + "data/images/1.jpg" + ] }, { "messages": [ { - "content": "Who is he?", + "content": "Who is he?", "role": "user" }, { @@ -39,12 +41,14 @@ "role": "assistant" } ], - "image": "2.jpg" + "images": [ + "data/images/2.jpg" + ] }, { "messages": [ { - "content": "Please describe this image", + "content": "Please describe this image", "role": "user" }, { @@ -60,6 +64,8 @@ "role": "assistant" } ], - "image": "3.jpg" + "images": [ + "data/images/3.jpg" + ] } -] \ No newline at end of file +] diff --git a/examples/mllm/sft_instructblip.sh b/examples/mllm/sft_instructblip.sh deleted file mode 100644 index b3923655..00000000 --- a/examples/mllm/sft_instructblip.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ - --stage sft_mm \ - --do_train \ - --model_name_or_path Salesforce/instructblip-vicuna-7b \ - --dataset mllm_instruct_example \ - --dataset_dir data \ - --template default \ - --finetuning_type lora \ - --lora_target all \ - --output_dir saves/instructblip-vicuna-7b/lora/sft \ - --overwrite_cache \ - --overwrite_output_dir \ - --cutoff_len 1024 \ - --preprocessing_num_workers 16 \ - --per_device_train_batch_size 3 \ - --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 1 \ - --lr_scheduler_type cosine \ - --logging_steps 1 \ - --warmup_steps 20 \ - --save_steps 100 \ - --eval_steps 100 \ - --evaluation_strategy steps \ - --load_best_model_at_end \ - --learning_rate 1e-5 \ - --num_train_epochs 50 \ - --max_samples 3000 \ - --val_size 0.1 \ - --plot_loss \ - --bf16 \ No newline at end of file diff --git a/scripts/test_mllm.py b/scripts/test_mllm.py index 961f02bf..94d8670b 100644 --- a/scripts/test_mllm.py +++ b/scripts/test_mllm.py @@ -29,7 +29,10 @@ def get_processor(model_path): def apply_lora(base_model_path, model_path, lora_path): print(f"Loading the base model from {base_model_path}") base_model = AutoModelForVision2Seq.from_pretrained( - base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="cuda", + base_model_path, + torch_dtype=torch.float16, + low_cpu_mem_usage=True, + device_map="cuda", ) processor = get_processor(base_model_path) tokenizer = processor.tokenizer @@ -60,11 +63,14 @@ def main( if not os.path.exists(model_path) or do_merge: apply_lora(base_model_path, model_path, lora_model_path) model = AutoModelForVision2Seq.from_pretrained( - model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, device_map="cuda" + model_path, + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + device_map="cuda", ) processor = get_processor(model_path) raw_datasets = load_dataset(dataset_name) - train_dataset = raw_datasets['train'] + train_dataset = raw_datasets["train"] examples = train_dataset.select(range(3)) texts = [] images = [] @@ -81,5 +87,5 @@ def main( print(res) -if __name__ == '__main__': +if __name__ == "__main__": fire.Fire(main) diff --git a/src/llmtuner/data/__init__.py b/src/llmtuner/data/__init__.py index 27a2f3b8..00a82d73 100644 --- a/src/llmtuner/data/__init__.py +++ b/src/llmtuner/data/__init__.py @@ -1,12 +1,11 @@ from .collator import PairwiseDataCollatorWithPadding -from .loader import get_dataset, get_mm_dataset +from .loader import get_dataset from .template import Template, get_template_and_fix_tokenizer, templates from .utils import Role, split_dataset __all__ = [ "PairwiseDataCollatorWithPadding", "get_dataset", - "get_mm_dataset", "Template", "get_template_and_fix_tokenizer", "templates", diff --git a/src/llmtuner/data/aligner.py b/src/llmtuner/data/aligner.py index 4de37e6d..85202ea8 100644 --- a/src/llmtuner/data/aligner.py +++ b/src/llmtuner/data/aligner.py @@ -13,7 +13,9 @@ if TYPE_CHECKING: from .parser import DatasetAttr -def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: +def convert_alpaca( + examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" +) -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": []} for i in range(len(examples[dataset_attr.prompt])): prompt = [] @@ -31,24 +33,38 @@ def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") prompt.append({"role": Role.USER.value, "content": "\n".join(content)}) - if dataset_attr.response and isinstance(examples[dataset_attr.response][i], list): + if dataset_attr.response and isinstance( + examples[dataset_attr.response][i], list + ): response = [ - {"role": Role.ASSISTANT.value, "content": content} for content in examples[dataset_attr.response][i] + {"role": Role.ASSISTANT.value, "content": content} + for content in examples[dataset_attr.response][i] + ] + elif dataset_attr.response and isinstance( + examples[dataset_attr.response][i], str + ): + response = [ + { + "role": Role.ASSISTANT.value, + "content": examples[dataset_attr.response][i], + } ] - elif dataset_attr.response and isinstance(examples[dataset_attr.response][i], str): - response = [{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.response][i]}] else: response = [] outputs["prompt"].append(prompt) outputs["response"].append(response) - outputs["system"].append(examples[dataset_attr.system][i] if dataset_attr.system else "") + outputs["system"].append( + examples[dataset_attr.system][i] if dataset_attr.system else "" + ) outputs["tools"].append("") - + outputs["images"].append([]) return outputs -def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: +def convert_sharegpt( + examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" +) -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": []} tag_mapping = { dataset_attr.user_tag: Role.USER.value, @@ -61,7 +77,10 @@ def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag) accept_tags = (odd_tags, even_tags) for i, messages in enumerate(examples[dataset_attr.messages]): - if dataset_attr.system_tag and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag: + if ( + dataset_attr.system_tag + and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag + ): system = messages[0][dataset_attr.content_tag] messages = messages[1:] else: @@ -77,19 +96,81 @@ def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" raise ValueError("Invalid role tag in {}.".format(messages)) aligned_messages.append( - {"role": tag_mapping[message[dataset_attr.role_tag]], "content": message[dataset_attr.content_tag]} + { + "role": tag_mapping[message[dataset_attr.role_tag]], + "content": message[dataset_attr.content_tag], + } ) outputs["prompt"].append(aligned_messages[:-1]) outputs["response"].append(aligned_messages[-1:]) outputs["system"].append(system) - outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "") + outputs["tools"].append( + examples[dataset_attr.tools][i] if dataset_attr.tools else "" + ) + outputs["images"].append([]) + + return outputs + + +def convert_llava( + examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" +) -> Dict[str, List[Any]]: + outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []} + tag_mapping = { + dataset_attr.user_tag: Role.USER.value, + dataset_attr.assistant_tag: Role.ASSISTANT.value, + dataset_attr.observation_tag: Role.OBSERVATION.value, + dataset_attr.function_tag: Role.FUNCTION.value, + dataset_attr.system_tag: Role.SYSTEM.value, + } + odd_tags = (dataset_attr.user_tag, dataset_attr.observation_tag) + even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag) + accept_tags = (odd_tags, even_tags) + for i, messages in enumerate(examples[dataset_attr.messages]): + if ( + dataset_attr.system_tag + and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag + ): + system = messages[0][dataset_attr.content_tag] + messages = messages[1:] + else: + system = examples[dataset_attr.system][i] if dataset_attr.system else "" + + messages = messages[: len(messages) // 2 * 2] # should be multiples of 2 + if len(messages) == 0: + continue + + aligned_messages = [] + for turn_idx, message in enumerate(messages): + if message[dataset_attr.role_tag] not in accept_tags[turn_idx % 2]: + raise ValueError("Invalid role tag in {}.".format(messages)) + + aligned_messages.append( + { + "role": tag_mapping[message[dataset_attr.role_tag]], + "content": message[dataset_attr.content_tag], + } + ) + + outputs["prompt"].append(aligned_messages[:-1]) + outputs["response"].append(aligned_messages[-1:]) + outputs["system"].append(system) + outputs["tools"].append( + examples[dataset_attr.tools][i] if dataset_attr.tools else "" + ) + print(examples[dataset_attr.images][i]) + outputs["images"].append( + examples[dataset_attr.images][i] if dataset_attr.images else [] + ) return outputs def align_dataset( - dataset: Union["Dataset", "IterableDataset"], dataset_attr: "DatasetAttr", data_args: "DataArguments" + dataset: Union["Dataset", "IterableDataset"], + dataset_attr: "DatasetAttr", + data_args: "DataArguments", ) -> Union["Dataset", "IterableDataset"]: r""" Aligned dataset: @@ -100,6 +181,8 @@ def align_dataset( """ if dataset_attr.formatting == "alpaca": convert_func = partial(convert_alpaca, dataset_attr=dataset_attr) + elif dataset_attr.formatting == "llava": + convert_func = partial(convert_llava, dataset_attr=dataset_attr) else: convert_func = partial(convert_sharegpt, dataset_attr=dataset_attr) @@ -107,13 +190,20 @@ def align_dataset( features = Features.from_dict( { "prompt": [ - {"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}} + { + "role": {"dtype": "string", "_type": "Value"}, + "content": {"dtype": "string", "_type": "Value"}, + } ], "response": [ - {"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}} + { + "role": {"dtype": "string", "_type": "Value"}, + "content": {"dtype": "string", "_type": "Value"}, + } ], "system": {"dtype": "string", "_type": "Value"}, "tools": {"dtype": "string", "_type": "Value"}, + "images": {"feature": {"_type": "Image"}, "_type": "Sequence"}, } ) kwargs = {} diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index 18665731..c373e196 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -1,6 +1,6 @@ import inspect import os -from typing import TYPE_CHECKING, Literal, Union +from typing import TYPE_CHECKING, Literal, Union, Optional from datasets import load_dataset, load_from_disk @@ -25,9 +25,9 @@ logger = get_logger(__name__) def load_single_dataset( - dataset_attr: "DatasetAttr", - model_args: "ModelArguments", - data_args: "DataArguments", + dataset_attr: "DatasetAttr", + model_args: "ModelArguments", + data_args: "DataArguments", ) -> Union["Dataset", "IterableDataset"]: logger.info("Loading dataset {}...".format(dataset_attr)) data_path, data_name, data_dir, data_files = None, None, None, None @@ -78,14 +78,20 @@ def load_single_dataset( split=data_args.split, cache_dir=cache_dir, token=model_args.ms_hub_token, - use_streaming=(data_args.streaming and (dataset_attr.load_from != "file")), + use_streaming=( + data_args.streaming and (dataset_attr.load_from != "file") + ), ) if isinstance(dataset, MsDataset): dataset = dataset.to_hf_dataset() except ImportError: - raise ImportError("Please install modelscope via `pip install modelscope -U`") + raise ImportError( + "Please install modelscope via `pip install modelscope -U`" + ) else: - if "trust_remote_code" in inspect.signature(load_dataset).parameters: # for datasets==2.16.0 + if ( + "trust_remote_code" in inspect.signature(load_dataset).parameters + ): # for datasets==2.16.0 kwargs = {"trust_remote_code": True} else: kwargs = {} @@ -102,7 +108,9 @@ def load_single_dataset( **kwargs, ) - if data_args.streaming and (dataset_attr.load_from == "file"): # faster than specifying streaming=True + if data_args.streaming and ( + dataset_attr.load_from == "file" + ): # faster than specifying streaming=True dataset = dataset.to_iterable_dataset() # TODO: add num shards parameter if data_args.max_samples is not None: # truncate dataset @@ -113,11 +121,12 @@ def load_single_dataset( def get_dataset( - tokenizer: "PreTrainedTokenizer", - model_args: "ModelArguments", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - stage: Literal["pt", "sft", "rm", "ppo"], + tokenizer: "PreTrainedTokenizer", + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + stage: Literal["pt", "sft", "rm", "ppo"], + processor: Optional["AutoProcessor"] = None, ) -> Union["Dataset", "IterableDataset"]: template = get_template_and_fix_tokenizer(tokenizer, data_args.template) if data_args.train_on_prompt and template.efficient_eos: @@ -126,9 +135,13 @@ def get_dataset( # Load tokenized dataset if data_args.tokenized_path is not None: if has_tokenized_data(data_args.tokenized_path): - logger.warning("Loading dataset from disk will ignore other data arguments.") + logger.warning( + "Loading dataset from disk will ignore other data arguments." + ) dataset = load_from_disk(data_args.tokenized_path) - logger.info("Loaded tokenized dataset from {}.".format(data_args.tokenized_path)) + logger.info( + "Loaded tokenized dataset from {}.".format(data_args.tokenized_path) + ) if data_args.streaming: dataset = dataset.to_iterable_dataset() return dataset @@ -139,15 +152,21 @@ def get_dataset( with training_args.main_process_first(desc="load dataset"): all_datasets = [] for dataset_attr in get_dataset_list(data_args): - if (stage == "rm" and dataset_attr.ranking is False) or (stage != "rm" and dataset_attr.ranking is True): - raise ValueError("The dataset is not applicable in the current training stage.") + if (stage == "rm" and dataset_attr.ranking is False) or ( + stage != "rm" and dataset_attr.ranking is True + ): + raise ValueError( + "The dataset is not applicable in the current training stage." + ) - all_datasets.append(load_single_dataset(dataset_attr, model_args, data_args)) + all_datasets.append( + load_single_dataset(dataset_attr, model_args, data_args) + ) dataset = merge_dataset(all_datasets, data_args, training_args) with training_args.main_process_first(desc="pre-process dataset"): preprocess_func, print_function = get_preprocess_and_print_func( - tokenizer, template, data_args, training_args, stage + tokenizer, template, data_args, training_args, stage, processor ) column_names = list(next(iter(dataset)).keys()) kwargs = {} @@ -158,13 +177,21 @@ def get_dataset( desc="Running tokenizer on dataset", ) - dataset = dataset.map(preprocess_func, batched=True, remove_columns=column_names, **kwargs) + dataset = dataset.map( + preprocess_func, batched=True, remove_columns=column_names, **kwargs + ) if data_args.tokenized_path is not None: if training_args.should_save: dataset.save_to_disk(data_args.tokenized_path) - logger.info("Tokenized dataset saved at {}.".format(data_args.tokenized_path)) - logger.info("Please restart the training with `--tokenized_path {}`.".format(data_args.tokenized_path)) + logger.info( + "Tokenized dataset saved at {}.".format(data_args.tokenized_path) + ) + logger.info( + "Please restart the training with `--tokenized_path {}`.".format( + data_args.tokenized_path + ) + ) exit(0) @@ -172,34 +199,8 @@ def get_dataset( try: print_function(next(iter(dataset))) except StopIteration: - raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.") + raise RuntimeError( + "Cannot find valid samples, check `data/README.md` for the data format." + ) return dataset - - -def get_mm_dataset( - processor: "AutoProcessor", - model_args: "ModelArguments", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - stage: Literal["pt", "sft", "rm", "ppo"], -) -> Union["Dataset", "IterableDataset"]: - if data_args.tokenized_path is not None: - if has_tokenized_data(data_args.tokenized_path): - logger.warning("Loading dataset from disk will ignore other data arguments.") - dataset = load_from_disk(data_args.tokenized_path) - logger.info("Loaded tokenized dataset from {}.".format(data_args.tokenized_path)) - if data_args.streaming: - dataset = dataset.to_iterable_dataset() - return dataset - - if data_args.streaming: - raise ValueError("Turn off `streaming` when saving dataset to disk.") - - with training_args.main_process_first(desc="load dataset"): - all_datasets = [] - for dataset_attr in get_dataset_list(data_args): - all_datasets.append(load_dataset(dataset_attr.dataset_name)['train']) - dataset = merge_dataset(all_datasets, data_args, training_args) - - return dataset diff --git a/src/llmtuner/data/parser.py b/src/llmtuner/data/parser.py index b9c8782a..79d6ed4e 100644 --- a/src/llmtuner/data/parser.py +++ b/src/llmtuner/data/parser.py @@ -25,7 +25,7 @@ class DatasetAttr: subset: Optional[str] = None folder: Optional[str] = None ranking: bool = False - formatting: Literal["alpaca", "sharegpt"] = "alpaca" + formatting: Literal["alpaca", "sharegpt", "llava"] = "alpaca" """ columns """ system: Optional[str] = None """ columns for the alpaca format """ @@ -44,11 +44,15 @@ class DatasetAttr: observation_tag: Optional[str] = "observation" function_tag: Optional[str] = "function_call" system_tag: Optional[str] = "system" + """ columns for the mllm format """ + images: Optional[str] = None def __repr__(self) -> str: return self.dataset_name - def set_attr(self, key: str, obj: Dict[str, Any], default: Optional[Any] = None) -> None: + def set_attr( + self, key: str, obj: Dict[str, Any], default: Optional[Any] = None + ) -> None: setattr(self, key, obj.get(key, default)) @@ -67,12 +71,16 @@ def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]: except Exception as err: if len(dataset_names) != 0: raise ValueError( - "Cannot open {} due to {}.".format(os.path.join(data_args.dataset_dir, DATA_CONFIG), str(err)) + "Cannot open {} due to {}.".format( + os.path.join(data_args.dataset_dir, DATA_CONFIG), str(err) + ) ) dataset_info = None if data_args.interleave_probs is not None: - data_args.interleave_probs = [float(prob.strip()) for prob in data_args.interleave_probs.split(",")] + data_args.interleave_probs = [ + float(prob.strip()) for prob in data_args.interleave_probs.split(",") + ] dataset_list: List[DatasetAttr] = [] for name in dataset_names: @@ -90,31 +98,42 @@ def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]: if has_hf_url or has_ms_url: if (use_modelscope() and has_ms_url) or (not has_hf_url): - dataset_attr = DatasetAttr("ms_hub", dataset_name=dataset_info[name]["ms_hub_url"]) + dataset_attr = DatasetAttr( + "ms_hub", dataset_name=dataset_info[name]["ms_hub_url"] + ) else: - dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"]) + dataset_attr = DatasetAttr( + "hf_hub", dataset_name=dataset_info[name]["hf_hub_url"] + ) elif "script_url" in dataset_info[name]: - dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"]) + dataset_attr = DatasetAttr( + "script", dataset_name=dataset_info[name]["script_url"] + ) else: - dataset_attr = DatasetAttr("file", dataset_name=dataset_info[name]["file_name"]) + dataset_attr = DatasetAttr( + "file", dataset_name=dataset_info[name]["file_name"] + ) dataset_attr.set_attr("file_sha1", dataset_info[name]) dataset_attr.set_attr("subset", dataset_info[name]) dataset_attr.set_attr("folder", dataset_info[name]) dataset_attr.set_attr("ranking", dataset_info[name], default=False) dataset_attr.set_attr("formatting", dataset_info[name], default="alpaca") + dataset_attr.set_attr("images", dataset_info[name], default="") if "columns" in dataset_info[name]: column_names = ["system"] if dataset_attr.formatting == "alpaca": column_names.extend(["prompt", "query", "response", "history"]) + elif dataset_attr.formatting == "llava": + column_names.extend(["messages", "images"]) else: column_names.extend(["messages", "tools"]) for column_name in column_names: dataset_attr.set_attr(column_name, dataset_info[name]["columns"]) - if dataset_attr.formatting == "sharegpt" and "tags" in dataset_info[name]: + if dataset_attr.formatting != "alpaca" and "tags" in dataset_info[name]: tag_names = ( "role_tag", "content_tag", diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 8494ba7e..dc72483f 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -1,6 +1,6 @@ from functools import partial from itertools import chain -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Tuple +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Tuple, Optional from ..extras.constants import IGNORE_INDEX from ..extras.logging import get_logger @@ -9,7 +9,7 @@ from .utils import Role if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments - from transformers.tokenization_utils import PreTrainedTokenizer + from transformers.tokenization_utils import PreTrainedTokenizer, AutoProcessor from ..hparams import DataArguments from .template import Template @@ -19,19 +19,27 @@ logger = get_logger(__name__) def preprocess_pretrain_dataset( - examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build grouped texts with format `X1 X2 X3 ...` if packing is enabled - text_examples = [messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"]] + text_examples = [ + messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"] + ] if not data_args.packing: if data_args.template == "gemma": text_examples = [tokenizer.bos_token + example for example in text_examples] - result = tokenizer(text_examples, add_special_tokens=False, max_length=data_args.cutoff_len) + result = tokenizer( + text_examples, add_special_tokens=False, max_length=data_args.cutoff_len + ) else: tokenized_examples = tokenizer(text_examples, add_special_tokens=False) - concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} + concatenated_examples = { + k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys() + } total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) block_size = data_args.cutoff_len total_length = (total_length // block_size) * block_size @@ -54,7 +62,11 @@ def preprocess_supervised_dataset( ) -> Dict[str, List[List[int]]]: # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. - model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} + model_inputs = { + "input_ids": [], + "attention_mask": [], + "labels": [], + } for i in range(len(examples["prompt"])): if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: @@ -75,7 +87,9 @@ def preprocess_supervised_dataset( if data_args.train_on_prompt: source_mask = source_ids elif turn_idx != 0 and template.efficient_eos: - source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * ( + len(source_ids) - 1 + ) else: source_mask = [IGNORE_INDEX] * len(source_ids) @@ -114,7 +128,9 @@ def preprocess_packed_supervised_dataset( if data_args.train_on_prompt: source_mask = source_ids elif len(input_ids) != 0 and template.efficient_eos: - source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * ( + len(source_ids) - 1 + ) else: source_mask = [IGNORE_INDEX] * len(source_ids) @@ -139,6 +155,64 @@ def preprocess_packed_supervised_dataset( return model_inputs +def preprocess_multimodal_supervised_dataset( + examples: Dict[str, List[Any]], + processor: "AutoProcessor", + template: "Template", + data_args: "DataArguments", +) -> Dict[str, List[List[int]]]: + # build inputs with format ` X Y ` and labels with format ` ... Y ` + # for multiturn examples, we only mask the prompt part in each prompt-response pair. + tokenizer = processor.tokenizer + model_inputs = { + "input_ids": [], + "attention_mask": [], + "labels": [], + "pixel_values": [], + } + + for i in range(len(examples["prompt"])): + if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: + continue + + messages = examples["prompt"][i] + examples["response"][i] + input_ids, labels = [], [] + for turn_idx, (source_ids, target_ids) in enumerate( + template.encode_multiturn( + tokenizer, + messages, + examples["system"][i], + examples["tools"][i], + data_args.cutoff_len, + data_args.reserved_label_len, + ) + ): + if data_args.train_on_prompt: + source_mask = source_ids + elif turn_idx != 0 and template.efficient_eos: + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * ( + len(source_ids) - 1 + ) + else: + source_mask = [IGNORE_INDEX] * len(source_ids) + + input_ids += source_ids + target_ids + labels += source_mask + target_ids + + if template.efficient_eos: + input_ids += [tokenizer.eos_token_id] + labels += [tokenizer.eos_token_id] + + model_inputs["input_ids"].append(input_ids) + model_inputs["attention_mask"].append([1] * len(input_ids)) + model_inputs["labels"].append(labels) + pixel_values = processor.image_processor( + examples["images"][0], return_tensors="pt" + )["pixel_values"][0] + model_inputs["pixel_values"].append(pixel_values) + return model_inputs + + def preprocess_unsupervised_dataset( examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", @@ -155,7 +229,9 @@ def preprocess_unsupervised_dataset( if len(examples["response"][i]) == 1: messages = examples["prompt"][i] + examples["response"][i] else: - messages = examples["prompt"][i] + [{"role": Role.ASSISTANT.value, "content": ""}] + messages = examples["prompt"][i] + [ + {"role": Role.ASSISTANT.value, "content": ""} + ] input_ids, labels = template.encode_oneturn( tokenizer, @@ -218,29 +294,58 @@ def preprocess_pairwise_dataset( return model_inputs -def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: +def print_supervised_dataset_example( + example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer" +) -> None: print("input_ids:\n{}".format(example["input_ids"])) - print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) + print( + "inputs:\n{}".format( + tokenizer.decode(example["input_ids"], skip_special_tokens=False) + ) + ) print("label_ids:\n{}".format(example["labels"])) print( "labels:\n{}".format( - tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False) + tokenizer.decode( + list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), + skip_special_tokens=False, + ) ) ) -def print_pairwise_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: +def print_pairwise_dataset_example( + example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer" +) -> None: print("prompt_ids:\n{}".format(example["prompt_ids"])) - print("prompt:\n{}".format(tokenizer.decode(example["prompt_ids"], skip_special_tokens=False))) + print( + "prompt:\n{}".format( + tokenizer.decode(example["prompt_ids"], skip_special_tokens=False) + ) + ) print("chosen_ids:\n{}".format(example["chosen_ids"])) - print("chosen:\n{}".format(tokenizer.decode(example["chosen_ids"], skip_special_tokens=False))) + print( + "chosen:\n{}".format( + tokenizer.decode(example["chosen_ids"], skip_special_tokens=False) + ) + ) print("rejected_ids:\n{}".format(example["rejected_ids"])) - print("rejected:\n{}".format(tokenizer.decode(example["rejected_ids"], skip_special_tokens=False))) + print( + "rejected:\n{}".format( + tokenizer.decode(example["rejected_ids"], skip_special_tokens=False) + ) + ) -def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: +def print_unsupervised_dataset_example( + example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer" +) -> None: print("input_ids:\n{}".format(example["input_ids"])) - print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) + print( + "inputs:\n{}".format( + tokenizer.decode(example["input_ids"], skip_special_tokens=False) + ) + ) def get_preprocess_and_print_func( @@ -249,30 +354,56 @@ def get_preprocess_and_print_func( data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo"], + processor: Optional["AutoProcessor"] = None, ) -> Tuple[Callable, Callable]: if stage == "pt": - preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args) - print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) + preprocess_func = partial( + preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args + ) + print_function = partial( + print_unsupervised_dataset_example, tokenizer=tokenizer + ) elif stage == "sft" and not training_args.predict_with_generate: if data_args.packing: preprocess_func = partial( - preprocess_packed_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_packed_supervised_dataset, + tokenizer=tokenizer, + template=template, + data_args=data_args, + ) + elif processor is not None: + preprocess_func = partial( + preprocess_multimodal_supervised_dataset, + processor=processor, + template=template, + data_args=data_args, ) else: preprocess_func = partial( - preprocess_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_supervised_dataset, + tokenizer=tokenizer, + template=template, + data_args=data_args, ) print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer) elif stage == "rm": preprocess_func = partial( - preprocess_pairwise_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_pairwise_dataset, + tokenizer=tokenizer, + template=template, + data_args=data_args, ) print_function = partial(print_pairwise_dataset_example, tokenizer=tokenizer) else: preprocess_func = partial( - preprocess_unsupervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_unsupervised_dataset, + tokenizer=tokenizer, + template=template, + data_args=data_args, + ) + print_function = partial( + print_unsupervised_dataset_example, tokenizer=tokenizer ) - print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) - return preprocess_func, print_function \ No newline at end of file + return preprocess_func, print_function diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index 73b22eb7..311660aa 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -42,7 +42,9 @@ class Template: r""" Returns a single pair of token ids representing prompt and response respectively. """ - encoded_pairs = self._encode(tokenizer, messages, system, tools, cutoff_len, reserved_label_len) + encoded_pairs = self._encode( + tokenizer, messages, system, tools, cutoff_len, reserved_label_len + ) prompt_ids = [] for query_ids, resp_ids in encoded_pairs[:-1]: prompt_ids += query_ids + resp_ids @@ -62,7 +64,9 @@ class Template: r""" Returns multiple pairs of token ids representing prompts and responses respectively. """ - return self._encode(tokenizer, messages, system, tools, cutoff_len, reserved_label_len) + return self._encode( + tokenizer, messages, system, tools, cutoff_len, reserved_label_len + ) def _encode( self, @@ -89,7 +93,9 @@ class Template: elements += self.format_separator.apply() if message["role"] == Role.USER.value: - elements += self.format_user.apply(content=message["content"], idx=str(i // 2)) + elements += self.format_user.apply( + content=message["content"], idx=str(i // 2) + ) elif message["role"] == Role.ASSISTANT.value: elements += self.format_assistant.apply(content=message["content"]) elif message["role"] == Role.OBSERVATION.value: @@ -104,7 +110,9 @@ class Template: return self._make_pairs(encoded_messages, cutoff_len, reserved_label_len) def _convert_elements_to_ids( - self, tokenizer: "PreTrainedTokenizer", elements: List[Union[str, Dict[str, str]]] + self, + tokenizer: "PreTrainedTokenizer", + elements: List[Union[str, Dict[str, str]]], ) -> List[int]: r""" Converts elements to token ids. @@ -122,7 +130,11 @@ class Template: elif "eos_token" in elem and tokenizer.eos_token_id is not None: token_ids += [tokenizer.eos_token_id] else: - raise ValueError("Input must be string, set[str] or dict[str, str], got {}".format(type(elem))) + raise ValueError( + "Input must be string, set[str] or dict[str, str], got {}".format( + type(elem) + ) + ) return token_ids @@ -180,7 +192,9 @@ class Llama2Template(Template): elements += self.format_separator.apply() if message["role"] == Role.USER.value: - elements += self.format_user.apply(content=system_text + message["content"]) + elements += self.format_user.apply( + content=system_text + message["content"] + ) elif message["role"] == Role.ASSISTANT.value: elements += self.format_assistant.apply(content=message["content"]) elif message["role"] == Role.OBSERVATION.value: @@ -243,7 +257,9 @@ def _register_template( template_class = Llama2Template if name.startswith("llama2") else Template default_user_formatter = StringFormatter(slots=["{{content}}"]) default_assistant_formatter = StringFormatter(slots=["{{content}}"] + eos_slots) - default_function_formatter = FunctionFormatter(slots=["Action: {{name}}\nAction Input: {{arguments}}"] + eos_slots) + default_function_formatter = FunctionFormatter( + slots=["Action: {{name}}\nAction Input: {{arguments}}"] + eos_slots + ) default_tool_formatter = ToolFormatter(tool_format="default") default_separator_formatter = EmptyFormatter() templates[name] = template_class( @@ -279,7 +295,9 @@ def _jinja_escape(content: str) -> str: return content.replace("\n", r"\n").replace("'", r"\'") -def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content") -> str: +def _convert_slots_to_jinja( + slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content" +) -> str: slot_items = [] for slot in slots: if isinstance(slot, str): @@ -293,7 +311,9 @@ def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", pl elif isinstance(slot, set): if "bos_token" in slot: slot_items.append("'" + tokenizer.bos_token + "'") - elif "eos_token" in slot: # do not use {{ eos_token }} since it may be replaced + elif ( + "eos_token" in slot + ): # do not use {{ eos_token }} since it may be replaced slot_items.append("'" + tokenizer.eos_token + "'") elif isinstance(slot, dict): raise ValueError("Dict is not supported.") @@ -305,25 +325,37 @@ def _get_jinja_template(template: "Template", tokenizer: "PreTrainedTokenizer") jinja_template = "" if template.default_system: - jinja_template += "{% set system_message = '" + _jinja_escape(template.default_system) + "' %}" + jinja_template += ( + "{% set system_message = '" + + _jinja_escape(template.default_system) + + "' %}" + ) jinja_template += ( - "{% if messages[0]['role'] == 'system' %}" "{% set system_message = messages[0]['content'] %}" "{% endif %}" + "{% if messages[0]['role'] == 'system' %}" + "{% set system_message = messages[0]['content'] %}" + "{% endif %}" ) - system_message = _convert_slots_to_jinja(template.format_system.apply(), tokenizer, placeholder="system_message") + system_message = _convert_slots_to_jinja( + template.format_system.apply(), tokenizer, placeholder="system_message" + ) if isinstance(template, Llama2Template): pass elif template.force_system: jinja_template += "{{ " + system_message + " }}" else: - jinja_template += "{% if system_message is defined %}{{ " + system_message + " }}{% endif %}" + jinja_template += ( + "{% if system_message is defined %}{{ " + system_message + " }}{% endif %}" + ) jinja_template += "{% for message in messages %}" jinja_template += "{% set content = message['content'] %}" if isinstance(template, Llama2Template): jinja_template += "{% if loop.index0 == 0 and system_message is defined %}" - jinja_template += "{% set content = " + system_message + " + message['content'] %}" + jinja_template += ( + "{% set content = " + system_message + " + message['content'] %}" + ) jinja_template += "{% endif %}" jinja_template += "{% if message['role'] == 'user' %}" user_message = _convert_slots_to_jinja(template.format_user.apply(), tokenizer) @@ -366,11 +398,14 @@ def get_template_and_fix_tokenizer( if stop_words: num_added_tokens = tokenizer.add_special_tokens( - dict(additional_special_tokens=stop_words), replace_additional_special_tokens=False + dict(additional_special_tokens=stop_words), + replace_additional_special_tokens=False, ) logger.info("Add {} to stop words.".format(",".join(stop_words))) if num_added_tokens > 0: - logger.warning("New tokens have been added, make sure `resize_vocab` is True.") + logger.warning( + "New tokens have been added, make sure `resize_vocab` is True." + ) try: tokenizer.chat_template = _get_jinja_template(template, tokenizer) @@ -382,7 +417,9 @@ def get_template_and_fix_tokenizer( _register_template( name="alpaca", - format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n\n### Response:\n"]), + format_user=StringFormatter( + slots=["### Instruction:\n{{content}}\n\n### Response:\n"] + ), format_separator=EmptyFormatter(slots=["\n\n"]), default_system=( "Below is an instruction that describes a task. " @@ -407,7 +444,13 @@ _register_template( _register_template( name="atom", format_user=StringFormatter( - slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"] + slots=[ + {"bos_token"}, + "Human: {{content}}\n", + {"eos_token"}, + {"bos_token"}, + "Assistant:", + ] ), format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]), ) @@ -415,7 +458,9 @@ _register_template( _register_template( name="baichuan", - format_user=StringFormatter(slots=[{"token": ""}, "{{content}}", {"token": ""}]), + format_user=StringFormatter( + slots=[{"token": ""}, "{{content}}", {"token": ""}] + ), efficient_eos=True, ) @@ -438,7 +483,9 @@ _register_template( _register_template( name="bluelm", - format_user=StringFormatter(slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}]), + format_user=StringFormatter( + slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}] + ), ) @@ -457,7 +504,9 @@ _register_template( _register_template( name="chatglm2", format_user=StringFormatter(slots=["[Round {{idx}}]\n\n问:{{content}}\n\n答:"]), - format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), + format_system=StringFormatter( + slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"] + ), format_separator=EmptyFormatter(slots=["\n\n"]), efficient_eos=True, force_system=True, @@ -466,12 +515,21 @@ _register_template( _register_template( name="chatglm3", - format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), + format_user=StringFormatter( + slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] + ), format_assistant=StringFormatter(slots=["\n", "{{content}}"]), - format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), + format_system=StringFormatter( + slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"] + ), format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]), format_observation=StringFormatter( - slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] + slots=[ + {"token": "<|observation|>"}, + "\n", + "{{content}}", + {"token": "<|assistant|>"}, + ] ), stop_words=["<|user|>", "<|observation|>"], efficient_eos=True, @@ -481,14 +539,27 @@ _register_template( _register_template( name="chatglm3_system", - format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), + format_user=StringFormatter( + slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] + ), format_assistant=StringFormatter(slots=["\n", "{{content}}"]), format_system=StringFormatter( - slots=[{"token": "[gMASK]"}, {"token": "sop"}, {"token": "<|system|>"}, "\n", "{{content}}"] + slots=[ + {"token": "[gMASK]"}, + {"token": "sop"}, + {"token": "<|system|>"}, + "\n", + "{{content}}", + ] ), format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]), format_observation=StringFormatter( - slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] + slots=[ + {"token": "<|observation|>"}, + "\n", + "{{content}}", + {"token": "<|assistant|>"}, + ] ), default_system=( "You are ChatGLM3, a large language model trained by Zhipu.AI. " @@ -501,9 +572,15 @@ _register_template( _register_template( name="chatml", - format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), - format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), - format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_user=StringFormatter( + slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), + format_system=StringFormatter( + slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] + ), + format_observation=StringFormatter( + slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), stop_words=["<|im_end|>", "<|im_start|>"], replace_eos=True, @@ -512,9 +589,15 @@ _register_template( _register_template( name="chatml_de", - format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), - format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), - format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_user=StringFormatter( + slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), + format_system=StringFormatter( + slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] + ), + format_observation=StringFormatter( + slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), default_system="Du bist ein freundlicher und hilfsbereiter KI-Assistent.", stop_words=["<|im_end|>", "<|im_start|>"], @@ -524,7 +607,9 @@ _register_template( _register_template( name="codegeex2", - format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), + format_system=StringFormatter( + slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"] + ), force_system=True, ) @@ -554,9 +639,15 @@ _register_template( _register_template( name="dbrx", - format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), - format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), - format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_user=StringFormatter( + slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), + format_system=StringFormatter( + slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] + ), + format_observation=StringFormatter( + slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), default_system=( "You are DBRX, created by Databricks. You were last updated in December 2023. " @@ -634,7 +725,9 @@ _register_template( _register_template( name="gemma", - format_user=StringFormatter(slots=["user\n{{content}}\nmodel\n"]), + format_user=StringFormatter( + slots=["user\n{{content}}\nmodel\n"] + ), format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]), format_observation=StringFormatter( slots=["tool\n{{content}}\nmodel\n"] @@ -647,7 +740,9 @@ _register_template( _register_template( name="intern", - format_user=StringFormatter(slots=["<|User|>:{{content}}", {"token": ""}, "\n<|Bot|>:"]), + format_user=StringFormatter( + slots=["<|User|>:{{content}}", {"token": ""}, "\n<|Bot|>:"] + ), format_separator=EmptyFormatter(slots=[{"token": ""}, "\n"]), stop_words=[""], efficient_eos=True, @@ -656,8 +751,12 @@ _register_template( _register_template( name="intern2", - format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), - format_system=StringFormatter(slots=[{"bos_token"}, "<|im_start|>system\n{{content}}<|im_end|>\n"]), + format_user=StringFormatter( + slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), + format_system=StringFormatter( + slots=[{"bos_token"}, "<|im_start|>system\n{{content}}<|im_end|>\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), default_system=( "You are an AI assistant whose name is InternLM (书生·浦语).\n" @@ -707,7 +806,10 @@ _register_template( ] ), format_system=StringFormatter( - slots=[{"bos_token"}, "<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"] + slots=[ + {"bos_token"}, + "<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>", + ] ), format_observation=StringFormatter( slots=[ @@ -742,7 +844,13 @@ _register_template( _register_template( name="openchat", - format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]), + format_user=StringFormatter( + slots=[ + "GPT4 Correct User: {{content}}", + {"eos_token"}, + "GPT4 Correct Assistant:", + ] + ), format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]), format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]), force_system=True, @@ -751,7 +859,9 @@ _register_template( _register_template( name="orion", - format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}]), + format_user=StringFormatter( + slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}] + ), format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]), force_system=True, ) @@ -759,9 +869,15 @@ _register_template( _register_template( name="phi", - format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"]), - format_system=StringFormatter(slots=[{"bos_token"}, "<|system|>\n{{content}}<|end|>\n"]), - format_observation=StringFormatter(slots=["<|function_output|>\n{{content}}<|end|>\n<|assistant|>\n"]), + format_user=StringFormatter( + slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"] + ), + format_system=StringFormatter( + slots=[{"bos_token"}, "<|system|>\n{{content}}<|end|>\n"] + ), + format_observation=StringFormatter( + slots=["<|function_output|>\n{{content}}<|end|>\n<|assistant|>\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), default_system="You are a helpful AI assistant.", stop_words=["<|end|>"], @@ -771,9 +887,15 @@ _register_template( _register_template( name="qwen", - format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), - format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), - format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_user=StringFormatter( + slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), + format_system=StringFormatter( + slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] + ), + format_observation=StringFormatter( + slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), default_system="You are a helpful assistant.", stop_words=["<|im_end|>"], @@ -829,8 +951,12 @@ _register_template( _register_template( name="yayi", - format_user=StringFormatter(slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"]), - format_system=StringFormatter(slots=[{"token": "<|System|>"}, ":\n{{content}}\n\n"]), + format_user=StringFormatter( + slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"] + ), + format_system=StringFormatter( + slots=[{"token": "<|System|>"}, ":\n{{content}}\n\n"] + ), format_separator=EmptyFormatter(slots=["\n\n"]), default_system=( "You are a helpful, respectful and honest assistant named YaYi " @@ -849,7 +975,9 @@ _register_template( _register_template( name="yi", - format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_user=StringFormatter( + slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] + ), format_separator=EmptyFormatter(slots=["\n"]), stop_words=["<|im_end|>"], replace_eos=True, @@ -867,7 +995,9 @@ _register_template( _register_template( name="zephyr", - format_user=StringFormatter(slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"]), + format_user=StringFormatter( + slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"] + ), format_assistant=StringFormatter(slots=["\n{{content}}", {"eos_token"}]), format_system=StringFormatter(slots=["<|system|>\n{{content}}", {"eos_token"}]), default_system="You are a friendly chatbot who always responds in the style of a pirate", @@ -879,3 +1009,13 @@ _register_template( format_user=StringFormatter(slots=[":{{content}}\n:"]), format_separator=EmptyFormatter(slots=["\n"]), ) + +_register_template( + name="llava", + format_user=StringFormatter(slots=["USER: {{content}} "]), + format_assistant=StringFormatter(slots=["ASSISTANT: {{content}}"]), + default_system=( + "A chat between a curious user and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the user's questions." + ), +) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index a6e4b710..63fc7f02 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -15,23 +15,33 @@ class ModelArguments: ) adapter_name_or_path: Optional[str] = field( default=None, - metadata={"help": "Path to the adapter weight or identifier from huggingface.co/models."}, + metadata={ + "help": "Path to the adapter weight or identifier from huggingface.co/models." + }, ) cache_dir: Optional[str] = field( default=None, - metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn."}, + metadata={ + "help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn." + }, ) use_fast_tokenizer: bool = field( default=True, - metadata={"help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)."}, + metadata={ + "help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)." + }, ) resize_vocab: bool = field( default=False, - metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."}, + metadata={ + "help": "Whether or not to resize the tokenizer vocab and the embedding layers." + }, ) split_special_tokens: bool = field( default=False, - metadata={"help": "Whether or not the special tokens should be split during the tokenization process."}, + metadata={ + "help": "Whether or not the special tokens should be split during the tokenization process." + }, ) new_special_tokens: Optional[str] = field( default=None, @@ -39,7 +49,9 @@ class ModelArguments: ) model_revision: str = field( default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + metadata={ + "help": "The specific model version to use (can be a branch name, tag name or commit id)." + }, ) low_cpu_mem_usage: bool = field( default=True, @@ -47,7 +59,9 @@ class ModelArguments: ) quantization_bit: Optional[int] = field( default=None, - metadata={"help": "The number of bits to quantize the model using bitsandbytes."}, + metadata={ + "help": "The number of bits to quantize the model using bitsandbytes." + }, ) quantization_type: Literal["fp4", "nf4"] = field( default="nf4", @@ -55,15 +69,21 @@ class ModelArguments: ) double_quantization: bool = field( default=True, - metadata={"help": "Whether or not to use double quantization in int4 training."}, + metadata={ + "help": "Whether or not to use double quantization in int4 training." + }, ) quantization_device_map: Optional[Literal["auto"]] = field( default=None, - metadata={"help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0."}, + metadata={ + "help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0." + }, ) rope_scaling: Optional[Literal["linear", "dynamic"]] = field( default=None, - metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."}, + metadata={ + "help": "Which scaling strategy should be adopted for the RoPE embeddings." + }, ) flash_attn: Literal["off", "sdpa", "fa2", "auto"] = field( default="auto", @@ -71,19 +91,27 @@ class ModelArguments: ) shift_attn: bool = field( default=False, - metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."}, + metadata={ + "help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA." + }, ) mixture_of_depths: Optional[Literal["convert", "load"]] = field( default=None, - metadata={"help": "Convert the model to mixture-of-depths (MoD) or load the MoD model."}, + metadata={ + "help": "Convert the model to mixture-of-depths (MoD) or load the MoD model." + }, ) use_unsloth: bool = field( default=False, - metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."}, + metadata={ + "help": "Whether or not to use unsloth's optimization for the LoRA training." + }, ) moe_aux_loss_coef: Optional[float] = field( default=None, - metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, + metadata={ + "help": "Coefficient of the auxiliary router loss in mixture-of-experts model." + }, ) disable_gradient_checkpointing: bool = field( default=False, @@ -107,7 +135,9 @@ class ModelArguments: ) vllm_gpu_util: float = field( default=0.9, - metadata={"help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine."}, + metadata={ + "help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine." + }, ) vllm_enforce_eager: bool = field( default=False, @@ -147,7 +177,9 @@ class ModelArguments: ) export_quantization_dataset: Optional[str] = field( default=None, - metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."}, + metadata={ + "help": "Path to the dataset or dataset name to use in quantizing the exported model." + }, ) export_quantization_nsamples: int = field( default=128, @@ -155,19 +187,27 @@ class ModelArguments: ) export_quantization_maxlen: int = field( default=1024, - metadata={"help": "The maximum length of the model inputs used for quantization."}, + metadata={ + "help": "The maximum length of the model inputs used for quantization." + }, ) export_legacy_format: bool = field( default=False, - metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."}, + metadata={ + "help": "Whether or not to save the `.bin` files instead of `.safetensors`." + }, ) export_hub_model_id: Optional[str] = field( default=None, - metadata={"help": "The name of the repository if push the model to the Hugging Face hub."}, + metadata={ + "help": "The name of the repository if push the model to the Hugging Face hub." + }, ) print_param_status: bool = field( default=False, - metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, + metadata={ + "help": "For debugging purposes, print the status of the parameters in the model." + }, ) use_mllm: bool = field( default=False, @@ -180,18 +220,39 @@ class ModelArguments: self.model_max_length = None if self.split_special_tokens and self.use_fast_tokenizer: - raise ValueError("`split_special_tokens` is only supported for slow tokenizers.") + raise ValueError( + "`split_special_tokens` is only supported for slow tokenizers." + ) - if self.adapter_name_or_path is not None: # support merging multiple lora weights - self.adapter_name_or_path = [path.strip() for path in self.adapter_name_or_path.split(",")] + if ( + self.adapter_name_or_path is not None + ): # support merging multiple lora weights + self.adapter_name_or_path = [ + path.strip() for path in self.adapter_name_or_path.split(",") + ] if self.new_special_tokens is not None: # support multiple special tokens - self.new_special_tokens = [token.strip() for token in self.new_special_tokens.split(",")] + self.new_special_tokens = [ + token.strip() for token in self.new_special_tokens.split(",") + ] - assert self.quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization." - assert self.export_quantization_bit in [None, 8, 4, 3, 2], "We only accept 2/3/4/8-bit quantization." + assert self.quantization_bit in [ + None, + 8, + 4, + ], "We only accept 4-bit or 8-bit quantization." + assert self.export_quantization_bit in [ + None, + 8, + 4, + 3, + 2, + ], "We only accept 2/3/4/8-bit quantization." - if self.export_quantization_bit is not None and self.export_quantization_dataset is None: + if ( + self.export_quantization_bit is not None + and self.export_quantization_dataset is None + ): raise ValueError("Quantization dataset is necessary for exporting.") def to_dict(self) -> Dict[str, Any]: diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index bcefee92..e65798b7 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -11,7 +11,7 @@ from .utils.unsloth import get_unsloth_peft_model, load_unsloth_peft_model if TYPE_CHECKING: - from transformers import PretrainedConfig, PreTrainedModel, AutoModelForVision2Seq + from transformers import PretrainedConfig, PreTrainedModel from ..hparams import FinetuningArguments, ModelArguments @@ -21,11 +21,11 @@ logger = get_logger(__name__) def init_adapter( config: "PretrainedConfig", - model: Union["PreTrainedModel","AutoModelForVision2Seq"], + model: Union["PreTrainedModel"], model_args: "ModelArguments", finetuning_args: "FinetuningArguments", is_trainable: bool, -) -> Union["PreTrainedModel","AutoModelForVision2Seq"]: +) -> Union["PreTrainedModel"]: r""" Initializes the adapters. @@ -38,7 +38,9 @@ def init_adapter( logger.info("Adapter is not found at evaluation, load the base model.") return model - if finetuning_args.finetuning_type != "lora" and getattr(model, "quantization_method", None): + if finetuning_args.finetuning_type != "lora" and getattr( + model, "quantization_method", None + ): raise ValueError("You can only use lora for quantized models.") if finetuning_args.finetuning_type == "full" and is_trainable: @@ -49,9 +51,9 @@ def init_adapter( if finetuning_args.finetuning_type == "freeze" and is_trainable: logger.info("Fine-tuning method: Freeze") num_layers = ( - getattr(model.config, "num_hidden_layers", None) - or getattr(model.config, "num_layers", None) - or getattr(model.config, "n_layer", None) + getattr(model.config, "num_hidden_layers", None) + or getattr(model.config, "num_layers", None) + or getattr(model.config, "n_layer", None) ) if not num_layers: raise ValueError("Current model does not support freeze tuning.") @@ -66,8 +68,12 @@ def init_adapter( stride = num_layers // finetuning_args.num_layer_trainable trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride) - elif finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0 - trainable_layer_ids = range(num_layers - finetuning_args.num_layer_trainable, num_layers) + elif ( + finetuning_args.num_layer_trainable > 0 + ): # fine-tuning the last n layers if num_layer_trainable > 0 + trainable_layer_ids = range( + num_layers - finetuning_args.num_layer_trainable, num_layers + ) else: # fine-tuning the first n layers if num_layer_trainable < 0 trainable_layer_ids = range(-finetuning_args.num_layer_trainable) @@ -82,11 +88,15 @@ def init_adapter( for module_name in finetuning_args.name_module_trainable: if module_name not in freeze_modules: raise ValueError( - "Module {} is not found, please choose from {}".format(module_name, ", ".join(freeze_modules)) + "Module {} is not found, please choose from {}".format( + module_name, ", ".join(freeze_modules) + ) ) for idx in trainable_layer_ids: - trainable_layers.append(".{:d}.{}".format(idx, module_name if module_name != "all" else "")) + trainable_layers.append( + ".{:d}.{}".format(idx, module_name if module_name != "all" else "") + ) for name, param in model.named_parameters(): if any(trainable_layer in name for trainable_layer in trainable_layers): @@ -95,27 +105,43 @@ def init_adapter( else: param.requires_grad_(False) - logger.info("Set trainable layers: {}".format(",".join(map(str, trainable_layer_ids)))) + logger.info( + "Set trainable layers: {}".format(",".join(map(str, trainable_layer_ids))) + ) if finetuning_args.finetuning_type == "lora": - logger.info("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA")) + logger.info( + "Fine-tuning method: {}".format( + "DoRA" if finetuning_args.use_dora else "LoRA" + ) + ) adapter_to_resume = None if model_args.adapter_name_or_path is not None: is_mergeable = True - if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable - assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter." + if getattr( + model, "quantization_method", None + ): # merge lora in quantized model is unstable + assert ( + len(model_args.adapter_name_or_path) == 1 + ), "Quantized model only accepts a single adapter." is_mergeable = False if is_deepspeed_zero3_enabled(): - assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3." + assert ( + len(model_args.adapter_name_or_path) == 1 + ), "Cannot use multiple adapters in DeepSpeed ZeRO-3." is_mergeable = False if model_args.use_unsloth: - assert len(model_args.adapter_name_or_path) == 1, "Unsloth model only accepts a single adapter." + assert ( + len(model_args.adapter_name_or_path) == 1 + ), "Unsloth model only accepts a single adapter." is_mergeable = False - if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable): + if (is_trainable and not finetuning_args.create_new_adapter) or ( + not is_mergeable + ): adapter_to_merge = model_args.adapter_name_or_path[:-1] adapter_to_resume = model_args.adapter_name_or_path[-1] else: @@ -132,7 +158,9 @@ def init_adapter( if adapter_to_resume is not None: # resume lora training if model_args.use_unsloth: - model = load_unsloth_peft_model(config, model_args, is_trainable=is_trainable) + model = load_unsloth_peft_model( + config, model_args, is_trainable=is_trainable + ) else: model = PeftModel.from_pretrained( model, @@ -141,19 +169,27 @@ def init_adapter( offload_folder=model_args.offload_folder, ) - if is_trainable and adapter_to_resume is None: # create new lora weights while training - if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": + if ( + is_trainable and adapter_to_resume is None + ): # create new lora weights while training + if ( + len(finetuning_args.lora_target) == 1 + and finetuning_args.lora_target[0] == "all" + ): target_modules = find_all_linear_modules(model) else: target_modules = finetuning_args.lora_target if finetuning_args.use_llama_pro: - target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable) + target_modules = find_expanded_modules( + model, target_modules, finetuning_args.num_layer_trainable + ) if ( - finetuning_args.use_dora - and getattr(model, "quantization_method", None) is not None - and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES + finetuning_args.use_dora + and getattr(model, "quantization_method", None) is not None + and getattr(model, "quantization_method", None) + != QuantizationMethod.BITS_AND_BYTES ): raise ValueError("DoRA is not compatible with PTQ-quantized models.") @@ -166,7 +202,11 @@ def init_adapter( module_names.add(name.split(".")[-1]) finetuning_args.additional_target = module_names - logger.warning("Vocab has been resized, add {} to trainable params.".format(",".join(module_names))) + logger.warning( + "Vocab has been resized, add {} to trainable params.".format( + ",".join(module_names) + ) + ) peft_kwargs = { "r": finetuning_args.lora_rank, @@ -193,6 +233,10 @@ def init_adapter( param.data = param.data.to(torch.float32) if model_args.adapter_name_or_path is not None: - logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) + logger.info( + "Loaded adapter(s): {}".format( + ",".join(model_args.adapter_name_or_path) + ) + ) - return model \ No newline at end of file + return model diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 3712a592..18b0cf79 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -1,6 +1,12 @@ from typing import TYPE_CHECKING, Any, Dict, Union -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoProcessor, AutoModelForVision2Seq +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + AutoProcessor, + AutoModelForVision2Seq, +) from trl import AutoModelForCausalLMWithValueHead from ..extras.logging import get_logger @@ -62,10 +68,14 @@ def load_tokenizer(model_args: "ModelArguments") -> "PreTrainedTokenizer": dict(additional_special_tokens=model_args.new_special_tokens), replace_additional_special_tokens=False, ) - logger.info("Add {} to special tokens.".format(",".join(model_args.new_special_tokens))) + logger.info( + "Add {} to special tokens.".format(",".join(model_args.new_special_tokens)) + ) if num_added_tokens > 0 and not model_args.resize_vocab: model_args.resize_vocab = True - logger.warning("New tokens have been added, changed `resize_vocab` to True.") + logger.warning( + "New tokens have been added, changed `resize_vocab` to True." + ) patch_tokenizer(tokenizer) return tokenizer @@ -111,7 +121,7 @@ def load_model( finetuning_args: "FinetuningArguments", is_trainable: bool = False, add_valuehead: bool = False, -) -> Union["PreTrainedModel", "AutoModelForVision2Seq"]: +) -> Union["PreTrainedModel"]: r""" Loads pretrained model. """ @@ -170,8 +180,10 @@ def load_model( trainable_params, all_param = count_parameters(model) if is_trainable: - param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( - trainable_params, all_param, 100 * trainable_params / all_param + param_stats = ( + "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( + trainable_params, all_param, 100 * trainable_params / all_param + ) ) else: param_stats = "all params: {:d}".format(all_param) @@ -185,4 +197,4 @@ def load_model( ) ) - return model \ No newline at end of file + return model diff --git a/src/llmtuner/train/sftmm/collator.py b/src/llmtuner/train/sftmm/collator.py index 95dbd939..2931dd9c 100644 --- a/src/llmtuner/train/sftmm/collator.py +++ b/src/llmtuner/train/sftmm/collator.py @@ -19,7 +19,9 @@ class DataCollatorForVis2Seq: texts.append(text) images.append(example["images"][0]) - batch = self.processor(text=texts, images=images, return_tensors="pt", padding=True) + batch = self.processor( + text=texts, images=images, return_tensors="pt", padding=True + ) labels = batch["input_ids"].clone() if self.processor.tokenizer.pad_token_id is not None: @@ -27,3 +29,14 @@ class DataCollatorForVis2Seq: batch["labels"] = labels return batch + + +@dataclass +class DataCollatorForMLLM: + processor: AutoProcessor + + def __call__(self, examples): + print(examples[0].keys()) + print(examples[0]["input_ids"]) + batch = {} + return batch diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py index 7afd8f6f..3849a563 100644 --- a/src/llmtuner/train/sftmm/workflow.py +++ b/src/llmtuner/train/sftmm/workflow.py @@ -1,47 +1,66 @@ # Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py import os from typing import TYPE_CHECKING, List, Optional -from ...data import split_dataset, get_mm_dataset +from ...data import get_dataset from ...extras.misc import get_logits_processor from ...extras.ploting import plot_loss -from ...model import load_tokenizer, load_processor, load_model +from ...model import load_processor, load_model from ..utils import create_modelcard_and_push from .metric import ComputeMetrics from .trainer import CustomSeq2SeqTrainer -from .collator import DataCollatorForVis2Seq +from transformers import DataCollatorForSeq2Seq +from ...extras.constants import IGNORE_INDEX if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments, TrainerCallback - from ...hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments + from ...hparams import ( + DataArguments, + FinetuningArguments, + GeneratingArguments, + ModelArguments, + ) def run_sft_mm( - model_args: "ModelArguments", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - finetuning_args: "FinetuningArguments", - generating_args: "GeneratingArguments", - callbacks: Optional[List["TrainerCallback"]] = None, + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + callbacks: Optional[List["TrainerCallback"]] = None, ): processor = load_processor(model_args) - tokenizer = load_tokenizer(model_args) - CHAT_TEMPLATE = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" - tokenizer.chat_template = CHAT_TEMPLATE - processor.tokenizer = tokenizer - model = load_model(processor.tokenizer, model_args, finetuning_args, training_args.do_train) - dataset = get_mm_dataset(processor, model_args, data_args, training_args, stage="sft") + tokenizer = processor.tokenizer + dataset = get_dataset( + tokenizer, model_args, data_args, training_args, "sft", processor + ) + model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) if getattr(model, "is_quantized", False) and not training_args.do_train: - setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction + setattr( + model, "_hf_peft_config_loaded", True + ) # hack here: make model compatible with prediction train_dataset = dataset eval_dataset = dataset - data_collator = DataCollatorForVis2Seq( - processor=processor, + data_collator = DataCollatorForSeq2Seq( + tokenizer=tokenizer, + pad_to_multiple_of=( + 8 if tokenizer.padding_side == "right" else None + ), # for shift short attention + label_pad_token_id=( + IGNORE_INDEX + if data_args.ignore_pad_token_for_loss + else tokenizer.pad_token_id + ), ) # Override the decoding parameters of Seq2SeqTrainer - training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len - training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams + training_args.generation_max_length = ( + training_args.generation_max_length or data_args.cutoff_len + ) + training_args.generation_num_beams = ( + data_args.eval_num_beams or training_args.generation_num_beams + ) training_args.remove_unused_columns = False # Initialize our Trainer @@ -52,19 +71,26 @@ def run_sft_mm( tokenizer=tokenizer, data_collator=data_collator, callbacks=callbacks, - compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None, + compute_metrics=( + ComputeMetrics(tokenizer) if training_args.predict_with_generate else None + ), train_dataset=train_dataset, eval_dataset=eval_dataset, ) + # Keyword arguments for `model.generate` gen_kwargs = generating_args.to_dict() - gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids + gen_kwargs["eos_token_id"] = [ + tokenizer.eos_token_id + ] + tokenizer.additional_special_tokens_ids gen_kwargs["pad_token_id"] = tokenizer.pad_token_id gen_kwargs["logits_processor"] = get_logits_processor() # Training if training_args.do_train: - train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + train_result = trainer.train( + resume_from_checkpoint=training_args.resume_from_checkpoint + ) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) @@ -75,19 +101,27 @@ def run_sft_mm( # Evaluation if training_args.do_eval: metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) - if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled + if ( + training_args.predict_with_generate + ): # eval_loss will be wrong if predict_with_generate is enabled metrics.pop("eval_loss", None) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: - predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) - if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled + predict_results = trainer.predict( + dataset, metric_key_prefix="predict", **gen_kwargs + ) + if ( + training_args.predict_with_generate + ): # predict_loss will be wrong if predict_with_generate is enabled predict_results.metrics.pop("predict_loss", None) trainer.log_metrics("predict", predict_results.metrics) trainer.save_metrics("predict", predict_results.metrics) trainer.save_predictions(predict_results) # Create model card - create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) + create_modelcard_and_push( + trainer, model_args, data_args, training_args, finetuning_args + ) From 42c90c8183a49cadb2c2abcc58f6ea27d325231d Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 19:58:47 +0800 Subject: [PATCH 09/34] merge data part to the text stream --- data/dataset_info.json | 3 --- data/mllm_example_dataset/README.md | 25 ------------------ data/mllm_example_dataset/data/test-0.parquet | Bin 4580 -> 0 bytes .../mllm_example_dataset/data/train-0.parquet | Bin 4580 -> 0 bytes scripts/test_mllm.py | 24 +++++++++++------ src/llmtuner/data/template.py | 4 +-- 6 files changed, 18 insertions(+), 38 deletions(-) delete mode 100644 data/mllm_example_dataset/README.md delete mode 100644 data/mllm_example_dataset/data/test-0.parquet delete mode 100644 data/mllm_example_dataset/data/train-0.parquet diff --git a/data/dataset_info.json b/data/dataset_info.json index f9adf108..4958d8c7 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -414,9 +414,6 @@ }, "folder": "python" }, - "llava_instruct": { - "hf_hub_url": "HuggingFaceH4/llava-instruct-mix-vsft" - }, "mllm_instruct_example": { "file_name": "llava_instruct_example.json", "formatting": "llava", diff --git a/data/mllm_example_dataset/README.md b/data/mllm_example_dataset/README.md deleted file mode 100644 index d5c8c0e6..00000000 --- a/data/mllm_example_dataset/README.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -dataset_info: - features: - - name: messages - list: - - name: content - list: - - name: index - dtype: int64 - - name: text - dtype: string - - name: type - dtype: string - - name: role - dtype: string - - name: images - sequence: image -configs: -- config_name: default - data_files: - - split: train - path: data/train-* - - split: test - path: data/test-* ---- \ No newline at end of file diff --git a/data/mllm_example_dataset/data/test-0.parquet b/data/mllm_example_dataset/data/test-0.parquet deleted file mode 100644 index 42c20b192497168523c3d39447cdae4495085b84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4580 zcmdTIO>84qc_wRnlaN&LHsc*{qws2Fr_nZRJ5HP?tA%tXA^uq>&A)#Tg6ElM&t%6l zW@a46Sr)C3s!AL=RTT%s38AX0RKx*L#Q`A>sHgT&sS*OM5OAnaFCYOb-+N;_KV-Ko zDiw^9H}8G_|8J5_d3#m}2xG!{K^PFYD;zgC!F3;j6XHT@MwkS2NC-`cgFVd2E+S4} z00p|Mr68=q5`I~@OgC>ge)D7d89u~G5BUip5KVKTvFOoYgSalP6E|QQu6LQ3q(Ln2 zvT9o%yw3oGhNY1sVIVRYd6$oTz)OqL;FFjXodG{NVSs~W3|<@u=Z8bWuLlc)$UOfj z@aNnpz>B(#utSiilg{!C0Cr*X902ZMiy&-iDB}?C_%22@$8I16cZg%5^_FF*dVHJ- zz#a-K6G*cStG+xvnhjRwRdBBAU=JY3ws>P6xwfhj2h|K>YQv*1Yle$-vMhmsEP2jF zySm-LF32@a{>IujI8F>Ma-E^YaJf)-?3Sf1CxBETDsH(1>HNlsYZ}_kMpPvX5(fPocD;ve`lr& zf>9H9OdD%_PE-v{v(5kZ4?8-sj&-|*W*1Pya9zW;kYS;G3*wwgrcyJIgO-b`1T@gN zK}H~4j_aY7(Vyz7acGzZwvb(ejM%Lvnxouq6RX!(#^;h~9e4QkvAHu4)iVC5Rj~iDS@$#AeTYHxA`usg7>WF8Sb8`j{q@qsr)&I$Efy1`0_{8)E#Vgf z;2<@Gy2y$TqA3YCPDPWOW|oRUh6b{yv;BC`rk%XZ2bDqT!z=$`6go}9qVNMg@+L1m zxFWqBo>(}i^g=&w2=SgD!Y1_ty3QGbH-@@(J#2a43phIi)5M>bR4gSDhx#Ny9)9r> zticbTqTYxBKKR37>D!UMH`$9vV!*LYYPcBr5g+*(FTE;A?~F6U&uh5Qc$j+yFpnmI z&cAEI zCI&>0-9F_=VBpdeC;jr7$NLg!VoztTyg3m0)`0Z-HR%^o8rw4p9;x#pm!p3lOLJn# zRdv$9LV!yTi2c06JsT?A22;}kk=<}`83Dd``T5knEAW$uLvV{-AAoM5mm?>Pz@GKb zb*@0~@h$+0{tSQ?Qx^c5yZQYjRRLd`pZTt28o92ZNGLiHHUD>R_y4a!`B@LN|MJNB z6z9ih-@cKbKOG54gOS&+z{gy4M*N)Ks@LJ(u3?pkZw=gw8CK1X-9q)uVQOO&BDdBRcWjpRJP<}%1b)=X0=f{%pKTu*f%Q0wWL17zNC-8)&Nj5N{w3etSwT_2 z-=?Jqabcv4{3O!y7dR0$u><4OyQwytH?iZ`ZFEQ+_UH0!I-ZQDq9%Opo%`Wl8HT_5 I;r~1T1e%nlz5oCK diff --git a/data/mllm_example_dataset/data/train-0.parquet b/data/mllm_example_dataset/data/train-0.parquet deleted file mode 100644 index 42c20b192497168523c3d39447cdae4495085b84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4580 zcmdTIO>84qc_wRnlaN&LHsc*{qws2Fr_nZRJ5HP?tA%tXA^uq>&A)#Tg6ElM&t%6l zW@a46Sr)C3s!AL=RTT%s38AX0RKx*L#Q`A>sHgT&sS*OM5OAnaFCYOb-+N;_KV-Ko zDiw^9H}8G_|8J5_d3#m}2xG!{K^PFYD;zgC!F3;j6XHT@MwkS2NC-`cgFVd2E+S4} z00p|Mr68=q5`I~@OgC>ge)D7d89u~G5BUip5KVKTvFOoYgSalP6E|QQu6LQ3q(Ln2 zvT9o%yw3oGhNY1sVIVRYd6$oTz)OqL;FFjXodG{NVSs~W3|<@u=Z8bWuLlc)$UOfj z@aNnpz>B(#utSiilg{!C0Cr*X902ZMiy&-iDB}?C_%22@$8I16cZg%5^_FF*dVHJ- zz#a-K6G*cStG+xvnhjRwRdBBAU=JY3ws>P6xwfhj2h|K>YQv*1Yle$-vMhmsEP2jF zySm-LF32@a{>IujI8F>Ma-E^YaJf)-?3Sf1CxBETDsH(1>HNlsYZ}_kMpPvX5(fPocD;ve`lr& zf>9H9OdD%_PE-v{v(5kZ4?8-sj&-|*W*1Pya9zW;kYS;G3*wwgrcyJIgO-b`1T@gN zK}H~4j_aY7(Vyz7acGzZwvb(ejM%Lvnxouq6RX!(#^;h~9e4QkvAHu4)iVC5Rj~iDS@$#AeTYHxA`usg7>WF8Sb8`j{q@qsr)&I$Efy1`0_{8)E#Vgf z;2<@Gy2y$TqA3YCPDPWOW|oRUh6b{yv;BC`rk%XZ2bDqT!z=$`6go}9qVNMg@+L1m zxFWqBo>(}i^g=&w2=SgD!Y1_ty3QGbH-@@(J#2a43phIi)5M>bR4gSDhx#Ny9)9r> zticbTqTYxBKKR37>D!UMH`$9vV!*LYYPcBr5g+*(FTE;A?~F6U&uh5Qc$j+yFpnmI z&cAEI zCI&>0-9F_=VBpdeC;jr7$NLg!VoztTyg3m0)`0Z-HR%^o8rw4p9;x#pm!p3lOLJn# zRdv$9LV!yTi2c06JsT?A22;}kk=<}`83Dd``T5knEAW$uLvV{-AAoM5mm?>Pz@GKb zb*@0~@h$+0{tSQ?Qx^c5yZQYjRRLd`pZTt28o92ZNGLiHHUD>R_y4a!`B@LN|MJNB z6z9ih-@cKbKOG54gOS&+z{gy4M*N)Ks@LJ(u3?pkZw=gw8CK1X-9q)uVQOO&BDdBRcWjpRJP<}%1b)=X0=f{%pKTu*f%Q0wWL17zNC-8)&Nj5N{w3etSwT_2 z-=?Jqabcv4{3O!y7dR0$u><4OyQwytH?iZ`ZFEQ+_UH0!I-ZQDq9%Opo%`Wl8HT_5 I;r~1T1e%nlz5oCK diff --git a/scripts/test_mllm.py b/scripts/test_mllm.py index 94d8670b..b8fe3e0f 100644 --- a/scripts/test_mllm.py +++ b/scripts/test_mllm.py @@ -6,22 +6,23 @@ from datasets import load_dataset from peft import PeftModel from transformers import AutoTokenizer, AutoModelForVision2Seq, AutoProcessor import shutil +from PIL import Image """usage python3 scripts/test_mllm.py \ --base_model_path llava-hf/llava-1.5-7b-hf \ --lora_model_path saves/llava-1.5-7b/lora/sft \ --model_path saves/llava-1.5-7b/lora/merged \ ---dataset_name data/mllm_example_dataset \ +--dataset_name data/llava_instruct_example.json \ --do_merge 1 """ def get_processor(model_path): - CHAT_TEMPLATE = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" + processor = AutoProcessor.from_pretrained(model_path) + CHAT_TEMPLATE = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {{ message['content'] }} ASSISTANT: {% else %}{{ message['content'] }}{% endif %} {% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}""" tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) tokenizer.chat_template = CHAT_TEMPLATE - processor = AutoProcessor.from_pretrained(model_path) processor.tokenizer = tokenizer return processor @@ -69,7 +70,7 @@ def main( device_map="cuda", ) processor = get_processor(model_path) - raw_datasets = load_dataset(dataset_name) + raw_datasets = load_dataset("json", data_files=dataset_name) train_dataset = raw_datasets["train"] examples = train_dataset.select(range(3)) texts = [] @@ -80,11 +81,18 @@ def main( messages, tokenize=False, add_generation_prompt=False ) texts.append(text) - images.append(example["images"][0]) - batch = processor(texts, images, return_tensors="pt", padding=True).to("cuda") + images.append(Image.open(example["images"][0])) + batch = processor(text=texts, images=images, return_tensors="pt", padding=True).to( + "cuda" + ) output = model.generate(**batch, max_new_tokens=100) - res = processor.batch_decode(output, skip_special_tokens=True) - print(res) + res_list = processor.batch_decode(output, skip_special_tokens=True) + for i, prompt in enumerate(texts): + res = res_list[i] + print(f"#{i}") + print(f"prompt:{prompt}") + print(f"response:{res[len(prompt):].strip()}") + print() if __name__ == "__main__": diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index 311660aa..e6cdadd6 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -1012,8 +1012,8 @@ _register_template( _register_template( name="llava", - format_user=StringFormatter(slots=["USER: {{content}} "]), - format_assistant=StringFormatter(slots=["ASSISTANT: {{content}}"]), + format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT: "]), + format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]), default_system=( "A chat between a curious user and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions." From eefcd105c12dd916a7b80825fdb1e979f87501d6 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 20:09:43 +0800 Subject: [PATCH 10/34] rm some --- src/llmtuner/train/sftmm/collator.py | 42 ---------------------------- 1 file changed, 42 deletions(-) delete mode 100644 src/llmtuner/train/sftmm/collator.py diff --git a/src/llmtuner/train/sftmm/collator.py b/src/llmtuner/train/sftmm/collator.py deleted file mode 100644 index 2931dd9c..00000000 --- a/src/llmtuner/train/sftmm/collator.py +++ /dev/null @@ -1,42 +0,0 @@ -from dataclasses import dataclass -from transformers import AutoProcessor - - -@dataclass -class DataCollatorForVis2Seq: - processor: AutoProcessor - - def __call__(self, examples): - texts = [] - images = [] - for example in examples: - if len(example["images"]) > 1: - raise ValueError("This collator only supports one image per example") - messages = example["messages"] - text = self.processor.tokenizer.apply_chat_template( - messages, tokenize=False, add_generation_prompt=False - ) - texts.append(text) - images.append(example["images"][0]) - - batch = self.processor( - text=texts, images=images, return_tensors="pt", padding=True - ) - - labels = batch["input_ids"].clone() - if self.processor.tokenizer.pad_token_id is not None: - labels[labels == self.processor.tokenizer.pad_token_id] = -100 - batch["labels"] = labels - - return batch - - -@dataclass -class DataCollatorForMLLM: - processor: AutoProcessor - - def __call__(self, examples): - print(examples[0].keys()) - print(examples[0]["input_ids"]) - batch = {} - return batch From 94ad744941b5d305892c38169a8478d0fb1a3019 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 21:08:32 +0800 Subject: [PATCH 11/34] add some --- src/llmtuner/model/__init__.py | 3 +- src/llmtuner/model/loader.py | 50 +++++------ src/llmtuner/train/sft/workflow.py | 72 +++++++++++---- src/llmtuner/train/sftmm/__init__.py | 3 - src/llmtuner/train/sftmm/metric.py | 61 ------------- src/llmtuner/train/sftmm/trainer.py | 44 ---------- src/llmtuner/train/sftmm/workflow.py | 127 --------------------------- src/llmtuner/train/tuner.py | 3 - 8 files changed, 80 insertions(+), 283 deletions(-) delete mode 100644 src/llmtuner/train/sftmm/__init__.py delete mode 100644 src/llmtuner/train/sftmm/metric.py delete mode 100644 src/llmtuner/train/sftmm/trainer.py delete mode 100644 src/llmtuner/train/sftmm/workflow.py diff --git a/src/llmtuner/model/__init__.py b/src/llmtuner/model/__init__.py index f6be60d8..db81e1dc 100644 --- a/src/llmtuner/model/__init__.py +++ b/src/llmtuner/model/__init__.py @@ -1,11 +1,10 @@ -from .loader import load_config, load_model, load_tokenizer, load_processor +from .loader import load_config, load_model, load_tokenizer from .utils.misc import find_all_linear_modules, load_valuehead_params __all__ = [ "load_config", "load_model", "load_tokenizer", - "load_processor", "load_valuehead_params", "find_all_linear_modules", ] diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 18b0cf79..99ad9adc 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -40,7 +40,9 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]: } -def load_tokenizer(model_args: "ModelArguments") -> "PreTrainedTokenizer": +def load_tokenizer( + model_args: "ModelArguments", +) -> Dict[str, Union["PreTrainedTokenizer", "AutoProcesser"]]: r""" Loads pretrained tokenizer. @@ -78,33 +80,25 @@ def load_tokenizer(model_args: "ModelArguments") -> "PreTrainedTokenizer": ) patch_tokenizer(tokenizer) - return tokenizer - - -def load_processor(model_args: "ModelArguments") -> "AutoProcessor": - r""" - Loads processor. Must before load_model. - - Note: including inplace operation of model_args. - """ - init_kwargs = _get_init_kwargs(model_args) - try: - processor = AutoProcessor.from_pretrained( - model_args.model_name_or_path, - use_fast=model_args.use_fast_tokenizer, - split_special_tokens=model_args.split_special_tokens, - padding_side="right", - **init_kwargs, - ) - except Exception: # try the fast one - processor = AutoProcessor.from_pretrained( - model_args.model_name_or_path, - use_fast=True, - padding_side="right", - **init_kwargs, - ) - - return processor + tokenizer_modules = {"tokenizer": tokenizer, "processor": None} + if model_args.use_mllm: + try: + processor = AutoProcessor.from_pretrained( + model_args.model_name_or_path, + use_fast=model_args.use_fast_tokenizer, + split_special_tokens=model_args.split_special_tokens, + padding_side="right", + **init_kwargs, + ) + except Exception: # try the fast one + processor = AutoProcessor.from_pretrained( + model_args.model_name_or_path, + use_fast=True, + padding_side="right", + **init_kwargs, + ) + tokenizer_modules["processor"] = processor + return tokenizer_modules def load_config(model_args: "ModelArguments") -> "PretrainedConfig": diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index 9ab78850..6f887810 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -17,7 +17,12 @@ from .trainer import CustomSeq2SeqTrainer if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments, TrainerCallback - from ...hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments + from ...hparams import ( + DataArguments, + FinetuningArguments, + GeneratingArguments, + ModelArguments, + ) def run_sft( @@ -28,25 +33,48 @@ def run_sft( generating_args: "GeneratingArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer = load_tokenizer(model_args) - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="sft") + tokenizer_modules = load_tokenizer(model_args) + tokenizer = tokenizer_modules["tokenizer"] + processor = tokenizer_modules["processor"] + dataset = get_dataset( + tokenizer, + model_args, + data_args, + training_args, + stage="sft", + processor=processor, + ) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) if training_args.predict_with_generate: tokenizer.padding_side = "left" # use left-padding in generation if getattr(model, "is_quantized", False) and not training_args.do_train: - setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction + setattr( + model, "_hf_peft_config_loaded", True + ) # hack here: make model compatible with prediction data_collator = DataCollatorForSeq2Seq( tokenizer=tokenizer, - pad_to_multiple_of=8 if tokenizer.padding_side == "right" else None, # for shift short attention - label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id, + pad_to_multiple_of=( + 8 if tokenizer.padding_side == "right" else None + ), # for shift short attention + label_pad_token_id=( + IGNORE_INDEX + if data_args.ignore_pad_token_for_loss + else tokenizer.pad_token_id + ), ) # Override the decoding parameters of Seq2SeqTrainer - training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len - training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams + training_args.generation_max_length = ( + training_args.generation_max_length or data_args.cutoff_len + ) + training_args.generation_num_beams = ( + data_args.eval_num_beams or training_args.generation_num_beams + ) + if model_args.use_mllm: + training_args.remove_unused_columns = False # Initialize our Trainer trainer = CustomSeq2SeqTrainer( @@ -56,19 +84,25 @@ def run_sft( tokenizer=tokenizer, data_collator=data_collator, callbacks=callbacks, - compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None, + compute_metrics=( + ComputeMetrics(tokenizer) if training_args.predict_with_generate else None + ), **split_dataset(dataset, data_args, training_args), ) # Keyword arguments for `model.generate` gen_kwargs = generating_args.to_dict() - gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids + gen_kwargs["eos_token_id"] = [ + tokenizer.eos_token_id + ] + tokenizer.additional_special_tokens_ids gen_kwargs["pad_token_id"] = tokenizer.pad_token_id gen_kwargs["logits_processor"] = get_logits_processor() # Training if training_args.do_train: - train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + train_result = trainer.train( + resume_from_checkpoint=training_args.resume_from_checkpoint + ) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) @@ -79,19 +113,27 @@ def run_sft( # Evaluation if training_args.do_eval: metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) - if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled + if ( + training_args.predict_with_generate + ): # eval_loss will be wrong if predict_with_generate is enabled metrics.pop("eval_loss", None) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: - predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) - if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled + predict_results = trainer.predict( + dataset, metric_key_prefix="predict", **gen_kwargs + ) + if ( + training_args.predict_with_generate + ): # predict_loss will be wrong if predict_with_generate is enabled predict_results.metrics.pop("predict_loss", None) trainer.log_metrics("predict", predict_results.metrics) trainer.save_metrics("predict", predict_results.metrics) trainer.save_predictions(predict_results) # Create model card - create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) + create_modelcard_and_push( + trainer, model_args, data_args, training_args, finetuning_args + ) diff --git a/src/llmtuner/train/sftmm/__init__.py b/src/llmtuner/train/sftmm/__init__.py deleted file mode 100644 index 3eb8b2e2..00000000 --- a/src/llmtuner/train/sftmm/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .workflow import run_sft_mm - -__all__ = ["run_sft_mm"] diff --git a/src/llmtuner/train/sftmm/metric.py b/src/llmtuner/train/sftmm/metric.py deleted file mode 100644 index d1af4c17..00000000 --- a/src/llmtuner/train/sftmm/metric.py +++ /dev/null @@ -1,61 +0,0 @@ -from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, Sequence, Tuple, Union - -import numpy as np - -from ...extras.constants import IGNORE_INDEX -from ...extras.packages import is_jieba_available, is_nltk_available, is_rouge_available - - -if TYPE_CHECKING: - from transformers.tokenization_utils import PreTrainedTokenizer - -if is_jieba_available(): - import jieba # type: ignore - -if is_nltk_available(): - from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu - -if is_rouge_available(): - from rouge_chinese import Rouge - - -@dataclass -class ComputeMetrics: - r""" - Wraps the tokenizer into metric functions, used in Seq2SeqPeftTrainer. - """ - - tokenizer: "PreTrainedTokenizer" - - def __call__(self, eval_preds: Sequence[Union[np.ndarray, Tuple[np.ndarray]]]) -> Dict[str, float]: - r""" - Uses the model predictions to compute metrics. - """ - preds, labels = eval_preds - score_dict = {"rouge-1": [], "rouge-2": [], "rouge-l": [], "bleu-4": []} - - preds = np.where(preds != IGNORE_INDEX, preds, self.tokenizer.pad_token_id) - labels = np.where(labels != IGNORE_INDEX, labels, self.tokenizer.pad_token_id) - - decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True) - decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True) - - for pred, label in zip(decoded_preds, decoded_labels): - hypothesis = list(jieba.cut(pred)) - reference = list(jieba.cut(label)) - - if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0: - result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}} - else: - rouge = Rouge() - scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference)) - result = scores[0] - - for k, v in result.items(): - score_dict[k].append(round(v["f"] * 100, 4)) - - bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) - score_dict["bleu-4"].append(round(bleu_score * 100, 4)) - - return {k: float(np.mean(v)) for k, v in score_dict.items()} diff --git a/src/llmtuner/train/sftmm/trainer.py b/src/llmtuner/train/sftmm/trainer.py deleted file mode 100644 index f094e609..00000000 --- a/src/llmtuner/train/sftmm/trainer.py +++ /dev/null @@ -1,44 +0,0 @@ -import json -import os -from types import MethodType -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union - -import numpy as np -import torch -from transformers import Seq2SeqTrainer, Trainer - -from ...extras.constants import IGNORE_INDEX -from ...extras.logging import get_logger -from ..utils import create_custom_optimzer, create_custom_scheduler - -if TYPE_CHECKING: - from transformers.trainer import PredictionOutput - from peft import PeftModelForCausalLM - from ...hparams import FinetuningArguments - -logger = get_logger(__name__) - - -class CustomSeq2SeqTrainer(Seq2SeqTrainer): - r""" - Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE. - """ - - def __init__(self, finetuning_args: "FinetuningArguments", **kwargs) -> None: - super().__init__(**kwargs) - self.finetuning_args = finetuning_args - if finetuning_args.use_badam: - from badam import clip_grad_norm_for_sparse_tensor - - self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator) - - def create_optimizer(self) -> "torch.optim.Optimizer": - if self.optimizer is None: - self.optimizer = create_custom_optimzer(self.model, self.args, self.finetuning_args) - return super().create_optimizer() - - def create_scheduler( - self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None - ) -> "torch.optim.lr_scheduler.LRScheduler": - create_custom_scheduler(self.args, num_training_steps, optimizer) - return super().create_scheduler(num_training_steps, optimizer) diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py deleted file mode 100644 index 3849a563..00000000 --- a/src/llmtuner/train/sftmm/workflow.py +++ /dev/null @@ -1,127 +0,0 @@ -# Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py -import os -from typing import TYPE_CHECKING, List, Optional -from ...data import get_dataset -from ...extras.misc import get_logits_processor -from ...extras.ploting import plot_loss -from ...model import load_processor, load_model -from ..utils import create_modelcard_and_push -from .metric import ComputeMetrics -from .trainer import CustomSeq2SeqTrainer -from transformers import DataCollatorForSeq2Seq -from ...extras.constants import IGNORE_INDEX - -if TYPE_CHECKING: - from transformers import Seq2SeqTrainingArguments, TrainerCallback - - from ...hparams import ( - DataArguments, - FinetuningArguments, - GeneratingArguments, - ModelArguments, - ) - - -def run_sft_mm( - model_args: "ModelArguments", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - finetuning_args: "FinetuningArguments", - generating_args: "GeneratingArguments", - callbacks: Optional[List["TrainerCallback"]] = None, -): - processor = load_processor(model_args) - tokenizer = processor.tokenizer - dataset = get_dataset( - tokenizer, model_args, data_args, training_args, "sft", processor - ) - model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) - if getattr(model, "is_quantized", False) and not training_args.do_train: - setattr( - model, "_hf_peft_config_loaded", True - ) # hack here: make model compatible with prediction - train_dataset = dataset - eval_dataset = dataset - data_collator = DataCollatorForSeq2Seq( - tokenizer=tokenizer, - pad_to_multiple_of=( - 8 if tokenizer.padding_side == "right" else None - ), # for shift short attention - label_pad_token_id=( - IGNORE_INDEX - if data_args.ignore_pad_token_for_loss - else tokenizer.pad_token_id - ), - ) - - # Override the decoding parameters of Seq2SeqTrainer - training_args.generation_max_length = ( - training_args.generation_max_length or data_args.cutoff_len - ) - training_args.generation_num_beams = ( - data_args.eval_num_beams or training_args.generation_num_beams - ) - training_args.remove_unused_columns = False - - # Initialize our Trainer - trainer = CustomSeq2SeqTrainer( - model=model, - args=training_args, - finetuning_args=finetuning_args, - tokenizer=tokenizer, - data_collator=data_collator, - callbacks=callbacks, - compute_metrics=( - ComputeMetrics(tokenizer) if training_args.predict_with_generate else None - ), - train_dataset=train_dataset, - eval_dataset=eval_dataset, - ) - - # Keyword arguments for `model.generate` - gen_kwargs = generating_args.to_dict() - gen_kwargs["eos_token_id"] = [ - tokenizer.eos_token_id - ] + tokenizer.additional_special_tokens_ids - gen_kwargs["pad_token_id"] = tokenizer.pad_token_id - gen_kwargs["logits_processor"] = get_logits_processor() - - # Training - if training_args.do_train: - train_result = trainer.train( - resume_from_checkpoint=training_args.resume_from_checkpoint - ) - trainer.save_model() - trainer.log_metrics("train", train_result.metrics) - trainer.save_metrics("train", train_result.metrics) - trainer.save_state() - if trainer.is_world_process_zero() and finetuning_args.plot_loss: - plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) - - # Evaluation - if training_args.do_eval: - metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) - if ( - training_args.predict_with_generate - ): # eval_loss will be wrong if predict_with_generate is enabled - metrics.pop("eval_loss", None) - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Predict - if training_args.do_predict: - predict_results = trainer.predict( - dataset, metric_key_prefix="predict", **gen_kwargs - ) - if ( - training_args.predict_with_generate - ): # predict_loss will be wrong if predict_with_generate is enabled - predict_results.metrics.pop("predict_loss", None) - trainer.log_metrics("predict", predict_results.metrics) - trainer.save_metrics("predict", predict_results.metrics) - trainer.save_predictions(predict_results) - - # Create model card - create_modelcard_and_push( - trainer, model_args, data_args, training_args, finetuning_args - ) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index ac56289c..5f691225 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -14,7 +14,6 @@ from .ppo import run_ppo from .pt import run_pt from .rm import run_rm from .sft import run_sft -from .sftmm import run_sft_mm if TYPE_CHECKING: from transformers import TrainerCallback @@ -30,8 +29,6 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra run_pt(model_args, data_args, training_args, finetuning_args, callbacks) elif finetuning_args.stage == "sft": run_sft(model_args, data_args, training_args, finetuning_args, generating_args, callbacks) - elif finetuning_args.stage == "sft_mm": - run_sft_mm(model_args, data_args, training_args, finetuning_args, generating_args, callbacks) elif finetuning_args.stage == "rm": run_rm(model_args, data_args, training_args, finetuning_args, callbacks) elif finetuning_args.stage == "ppo": From 1dcabafe72fe21c7f9122a6bc1a1ccc4f5d08fdd Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 21:15:16 +0800 Subject: [PATCH 12/34] modify style --- src/llmtuner/data/__init__.py | 1 + src/llmtuner/data/aligner.py | 49 ++----- src/llmtuner/data/loader.py | 59 +++------ src/llmtuner/data/parser.py | 28 +--- src/llmtuner/data/preprocess.py | 90 +++---------- src/llmtuner/data/template.py | 188 +++++++-------------------- src/llmtuner/hparams/model_args.py | 101 ++++---------- src/llmtuner/model/__init__.py | 1 + src/llmtuner/model/adapter.py | 82 +++--------- src/llmtuner/model/loader.py | 21 ++- src/llmtuner/train/sft/workflow.py | 50 ++----- src/llmtuner/train/sftmm/__init__.py | 4 + src/llmtuner/train/sftmm/metric.py | 61 +++++++++ src/llmtuner/train/sftmm/trainer.py | 39 ++++++ src/llmtuner/train/sftmm/workflow.py | 101 ++++++++++++++ src/llmtuner/train/tuner.py | 1 + 16 files changed, 374 insertions(+), 502 deletions(-) create mode 100644 src/llmtuner/train/sftmm/__init__.py create mode 100644 src/llmtuner/train/sftmm/metric.py create mode 100644 src/llmtuner/train/sftmm/trainer.py create mode 100644 src/llmtuner/train/sftmm/workflow.py diff --git a/src/llmtuner/data/__init__.py b/src/llmtuner/data/__init__.py index 00a82d73..792e89d9 100644 --- a/src/llmtuner/data/__init__.py +++ b/src/llmtuner/data/__init__.py @@ -3,6 +3,7 @@ from .loader import get_dataset from .template import Template, get_template_and_fix_tokenizer, templates from .utils import Role, split_dataset + __all__ = [ "PairwiseDataCollatorWithPadding", "get_dataset", diff --git a/src/llmtuner/data/aligner.py b/src/llmtuner/data/aligner.py index 85202ea8..9d440aff 100644 --- a/src/llmtuner/data/aligner.py +++ b/src/llmtuner/data/aligner.py @@ -13,9 +13,7 @@ if TYPE_CHECKING: from .parser import DatasetAttr -def convert_alpaca( - examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" -) -> Dict[str, List[Any]]: +def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": []} for i in range(len(examples[dataset_attr.prompt])): prompt = [] @@ -33,16 +31,11 @@ def convert_alpaca( prompt.append({"role": Role.USER.value, "content": "\n".join(content)}) - if dataset_attr.response and isinstance( - examples[dataset_attr.response][i], list - ): + if dataset_attr.response and isinstance(examples[dataset_attr.response][i], list): response = [ - {"role": Role.ASSISTANT.value, "content": content} - for content in examples[dataset_attr.response][i] + {"role": Role.ASSISTANT.value, "content": content} for content in examples[dataset_attr.response][i] ] - elif dataset_attr.response and isinstance( - examples[dataset_attr.response][i], str - ): + elif dataset_attr.response and isinstance(examples[dataset_attr.response][i], str): response = [ { "role": Role.ASSISTANT.value, @@ -54,17 +47,13 @@ def convert_alpaca( outputs["prompt"].append(prompt) outputs["response"].append(response) - outputs["system"].append( - examples[dataset_attr.system][i] if dataset_attr.system else "" - ) + outputs["system"].append(examples[dataset_attr.system][i] if dataset_attr.system else "") outputs["tools"].append("") outputs["images"].append([]) return outputs -def convert_sharegpt( - examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" -) -> Dict[str, List[Any]]: +def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": []} tag_mapping = { dataset_attr.user_tag: Role.USER.value, @@ -77,10 +66,7 @@ def convert_sharegpt( even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag) accept_tags = (odd_tags, even_tags) for i, messages in enumerate(examples[dataset_attr.messages]): - if ( - dataset_attr.system_tag - and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag - ): + if dataset_attr.system_tag and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag: system = messages[0][dataset_attr.content_tag] messages = messages[1:] else: @@ -105,17 +91,13 @@ def convert_sharegpt( outputs["prompt"].append(aligned_messages[:-1]) outputs["response"].append(aligned_messages[-1:]) outputs["system"].append(system) - outputs["tools"].append( - examples[dataset_attr.tools][i] if dataset_attr.tools else "" - ) + outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "") outputs["images"].append([]) return outputs -def convert_llava( - examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" -) -> Dict[str, List[Any]]: +def convert_llava(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []} tag_mapping = { dataset_attr.user_tag: Role.USER.value, @@ -128,10 +110,7 @@ def convert_llava( even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag) accept_tags = (odd_tags, even_tags) for i, messages in enumerate(examples[dataset_attr.messages]): - if ( - dataset_attr.system_tag - and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag - ): + if dataset_attr.system_tag and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag: system = messages[0][dataset_attr.content_tag] messages = messages[1:] else: @@ -156,13 +135,9 @@ def convert_llava( outputs["prompt"].append(aligned_messages[:-1]) outputs["response"].append(aligned_messages[-1:]) outputs["system"].append(system) - outputs["tools"].append( - examples[dataset_attr.tools][i] if dataset_attr.tools else "" - ) + outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "") print(examples[dataset_attr.images][i]) - outputs["images"].append( - examples[dataset_attr.images][i] if dataset_attr.images else [] - ) + outputs["images"].append(examples[dataset_attr.images][i] if dataset_attr.images else []) return outputs diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index c373e196..fa4aa9c1 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -1,6 +1,6 @@ import inspect import os -from typing import TYPE_CHECKING, Literal, Union, Optional +from typing import TYPE_CHECKING, Literal, Optional, Union from datasets import load_dataset, load_from_disk @@ -13,9 +13,10 @@ from .preprocess import get_preprocess_and_print_func from .template import get_template_and_fix_tokenizer from .utils import checksum, merge_dataset + if TYPE_CHECKING: from datasets import Dataset, IterableDataset - from transformers import Seq2SeqTrainingArguments, AutoProcessor + from transformers import AutoProcessor, Seq2SeqTrainingArguments from transformers.tokenization_utils import PreTrainedTokenizer from ..hparams import DataArguments, ModelArguments @@ -78,20 +79,14 @@ def load_single_dataset( split=data_args.split, cache_dir=cache_dir, token=model_args.ms_hub_token, - use_streaming=( - data_args.streaming and (dataset_attr.load_from != "file") - ), + use_streaming=(data_args.streaming and (dataset_attr.load_from != "file")), ) if isinstance(dataset, MsDataset): dataset = dataset.to_hf_dataset() except ImportError: - raise ImportError( - "Please install modelscope via `pip install modelscope -U`" - ) + raise ImportError("Please install modelscope via `pip install modelscope -U`") else: - if ( - "trust_remote_code" in inspect.signature(load_dataset).parameters - ): # for datasets==2.16.0 + if "trust_remote_code" in inspect.signature(load_dataset).parameters: # for datasets==2.16.0 kwargs = {"trust_remote_code": True} else: kwargs = {} @@ -108,9 +103,7 @@ def load_single_dataset( **kwargs, ) - if data_args.streaming and ( - dataset_attr.load_from == "file" - ): # faster than specifying streaming=True + if data_args.streaming and (dataset_attr.load_from == "file"): # faster than specifying streaming=True dataset = dataset.to_iterable_dataset() # TODO: add num shards parameter if data_args.max_samples is not None: # truncate dataset @@ -135,13 +128,9 @@ def get_dataset( # Load tokenized dataset if data_args.tokenized_path is not None: if has_tokenized_data(data_args.tokenized_path): - logger.warning( - "Loading dataset from disk will ignore other data arguments." - ) + logger.warning("Loading dataset from disk will ignore other data arguments.") dataset = load_from_disk(data_args.tokenized_path) - logger.info( - "Loaded tokenized dataset from {}.".format(data_args.tokenized_path) - ) + logger.info("Loaded tokenized dataset from {}.".format(data_args.tokenized_path)) if data_args.streaming: dataset = dataset.to_iterable_dataset() return dataset @@ -152,16 +141,10 @@ def get_dataset( with training_args.main_process_first(desc="load dataset"): all_datasets = [] for dataset_attr in get_dataset_list(data_args): - if (stage == "rm" and dataset_attr.ranking is False) or ( - stage != "rm" and dataset_attr.ranking is True - ): - raise ValueError( - "The dataset is not applicable in the current training stage." - ) + if (stage == "rm" and dataset_attr.ranking is False) or (stage != "rm" and dataset_attr.ranking is True): + raise ValueError("The dataset is not applicable in the current training stage.") - all_datasets.append( - load_single_dataset(dataset_attr, model_args, data_args) - ) + all_datasets.append(load_single_dataset(dataset_attr, model_args, data_args)) dataset = merge_dataset(all_datasets, data_args, training_args) with training_args.main_process_first(desc="pre-process dataset"): @@ -177,21 +160,13 @@ def get_dataset( desc="Running tokenizer on dataset", ) - dataset = dataset.map( - preprocess_func, batched=True, remove_columns=column_names, **kwargs - ) + dataset = dataset.map(preprocess_func, batched=True, remove_columns=column_names, **kwargs) if data_args.tokenized_path is not None: if training_args.should_save: dataset.save_to_disk(data_args.tokenized_path) - logger.info( - "Tokenized dataset saved at {}.".format(data_args.tokenized_path) - ) - logger.info( - "Please restart the training with `--tokenized_path {}`.".format( - data_args.tokenized_path - ) - ) + logger.info("Tokenized dataset saved at {}.".format(data_args.tokenized_path)) + logger.info("Please restart the training with `--tokenized_path {}`.".format(data_args.tokenized_path)) exit(0) @@ -199,8 +174,6 @@ def get_dataset( try: print_function(next(iter(dataset))) except StopIteration: - raise RuntimeError( - "Cannot find valid samples, check `data/README.md` for the data format." - ) + raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.") return dataset diff --git a/src/llmtuner/data/parser.py b/src/llmtuner/data/parser.py index 79d6ed4e..4d3d7741 100644 --- a/src/llmtuner/data/parser.py +++ b/src/llmtuner/data/parser.py @@ -50,9 +50,7 @@ class DatasetAttr: def __repr__(self) -> str: return self.dataset_name - def set_attr( - self, key: str, obj: Dict[str, Any], default: Optional[Any] = None - ) -> None: + def set_attr(self, key: str, obj: Dict[str, Any], default: Optional[Any] = None) -> None: setattr(self, key, obj.get(key, default)) @@ -71,16 +69,12 @@ def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]: except Exception as err: if len(dataset_names) != 0: raise ValueError( - "Cannot open {} due to {}.".format( - os.path.join(data_args.dataset_dir, DATA_CONFIG), str(err) - ) + "Cannot open {} due to {}.".format(os.path.join(data_args.dataset_dir, DATA_CONFIG), str(err)) ) dataset_info = None if data_args.interleave_probs is not None: - data_args.interleave_probs = [ - float(prob.strip()) for prob in data_args.interleave_probs.split(",") - ] + data_args.interleave_probs = [float(prob.strip()) for prob in data_args.interleave_probs.split(",")] dataset_list: List[DatasetAttr] = [] for name in dataset_names: @@ -98,21 +92,13 @@ def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]: if has_hf_url or has_ms_url: if (use_modelscope() and has_ms_url) or (not has_hf_url): - dataset_attr = DatasetAttr( - "ms_hub", dataset_name=dataset_info[name]["ms_hub_url"] - ) + dataset_attr = DatasetAttr("ms_hub", dataset_name=dataset_info[name]["ms_hub_url"]) else: - dataset_attr = DatasetAttr( - "hf_hub", dataset_name=dataset_info[name]["hf_hub_url"] - ) + dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"]) elif "script_url" in dataset_info[name]: - dataset_attr = DatasetAttr( - "script", dataset_name=dataset_info[name]["script_url"] - ) + dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"]) else: - dataset_attr = DatasetAttr( - "file", dataset_name=dataset_info[name]["file_name"] - ) + dataset_attr = DatasetAttr("file", dataset_name=dataset_info[name]["file_name"]) dataset_attr.set_attr("file_sha1", dataset_info[name]) dataset_attr.set_attr("subset", dataset_info[name]) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index dc72483f..1c8c64a6 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -1,6 +1,6 @@ from functools import partial from itertools import chain -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Tuple, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Tuple from ..extras.constants import IGNORE_INDEX from ..extras.logging import get_logger @@ -9,7 +9,7 @@ from .utils import Role if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments - from transformers.tokenization_utils import PreTrainedTokenizer, AutoProcessor + from transformers.tokenization_utils import AutoProcessor, PreTrainedTokenizer from ..hparams import DataArguments from .template import Template @@ -24,22 +24,16 @@ def preprocess_pretrain_dataset( data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build grouped texts with format `X1 X2 X3 ...` if packing is enabled - text_examples = [ - messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"] - ] + text_examples = [messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"]] if not data_args.packing: if data_args.template == "gemma": text_examples = [tokenizer.bos_token + example for example in text_examples] - result = tokenizer( - text_examples, add_special_tokens=False, max_length=data_args.cutoff_len - ) + result = tokenizer(text_examples, add_special_tokens=False, max_length=data_args.cutoff_len) else: tokenized_examples = tokenizer(text_examples, add_special_tokens=False) - concatenated_examples = { - k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys() - } + concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) block_size = data_args.cutoff_len total_length = (total_length // block_size) * block_size @@ -87,9 +81,7 @@ def preprocess_supervised_dataset( if data_args.train_on_prompt: source_mask = source_ids elif turn_idx != 0 and template.efficient_eos: - source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * ( - len(source_ids) - 1 - ) + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) else: source_mask = [IGNORE_INDEX] * len(source_ids) @@ -128,9 +120,7 @@ def preprocess_packed_supervised_dataset( if data_args.train_on_prompt: source_mask = source_ids elif len(input_ids) != 0 and template.efficient_eos: - source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * ( - len(source_ids) - 1 - ) + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) else: source_mask = [IGNORE_INDEX] * len(source_ids) @@ -190,9 +180,7 @@ def preprocess_multimodal_supervised_dataset( if data_args.train_on_prompt: source_mask = source_ids elif turn_idx != 0 and template.efficient_eos: - source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * ( - len(source_ids) - 1 - ) + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) else: source_mask = [IGNORE_INDEX] * len(source_ids) @@ -206,9 +194,7 @@ def preprocess_multimodal_supervised_dataset( model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) - pixel_values = processor.image_processor( - examples["images"][0], return_tensors="pt" - )["pixel_values"][0] + pixel_values = processor.image_processor(examples["images"][0], return_tensors="pt")["pixel_values"][0] model_inputs["pixel_values"].append(pixel_values) return model_inputs @@ -229,9 +215,7 @@ def preprocess_unsupervised_dataset( if len(examples["response"][i]) == 1: messages = examples["prompt"][i] + examples["response"][i] else: - messages = examples["prompt"][i] + [ - {"role": Role.ASSISTANT.value, "content": ""} - ] + messages = examples["prompt"][i] + [{"role": Role.ASSISTANT.value, "content": ""}] input_ids, labels = template.encode_oneturn( tokenizer, @@ -294,15 +278,9 @@ def preprocess_pairwise_dataset( return model_inputs -def print_supervised_dataset_example( - example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer" -) -> None: +def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: print("input_ids:\n{}".format(example["input_ids"])) - print( - "inputs:\n{}".format( - tokenizer.decode(example["input_ids"], skip_special_tokens=False) - ) - ) + print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("label_ids:\n{}".format(example["labels"])) print( "labels:\n{}".format( @@ -314,38 +292,18 @@ def print_supervised_dataset_example( ) -def print_pairwise_dataset_example( - example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer" -) -> None: +def print_pairwise_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: print("prompt_ids:\n{}".format(example["prompt_ids"])) - print( - "prompt:\n{}".format( - tokenizer.decode(example["prompt_ids"], skip_special_tokens=False) - ) - ) + print("prompt:\n{}".format(tokenizer.decode(example["prompt_ids"], skip_special_tokens=False))) print("chosen_ids:\n{}".format(example["chosen_ids"])) - print( - "chosen:\n{}".format( - tokenizer.decode(example["chosen_ids"], skip_special_tokens=False) - ) - ) + print("chosen:\n{}".format(tokenizer.decode(example["chosen_ids"], skip_special_tokens=False))) print("rejected_ids:\n{}".format(example["rejected_ids"])) - print( - "rejected:\n{}".format( - tokenizer.decode(example["rejected_ids"], skip_special_tokens=False) - ) - ) + print("rejected:\n{}".format(tokenizer.decode(example["rejected_ids"], skip_special_tokens=False))) -def print_unsupervised_dataset_example( - example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer" -) -> None: +def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: print("input_ids:\n{}".format(example["input_ids"])) - print( - "inputs:\n{}".format( - tokenizer.decode(example["input_ids"], skip_special_tokens=False) - ) - ) + print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) def get_preprocess_and_print_func( @@ -357,12 +315,8 @@ def get_preprocess_and_print_func( processor: Optional["AutoProcessor"] = None, ) -> Tuple[Callable, Callable]: if stage == "pt": - preprocess_func = partial( - preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args - ) - print_function = partial( - print_unsupervised_dataset_example, tokenizer=tokenizer - ) + preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args) + print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) elif stage == "sft" and not training_args.predict_with_generate: if data_args.packing: preprocess_func = partial( @@ -402,8 +356,6 @@ def get_preprocess_and_print_func( template=template, data_args=data_args, ) - print_function = partial( - print_unsupervised_dataset_example, tokenizer=tokenizer - ) + print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) return preprocess_func, print_function diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index e6cdadd6..cf21e932 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -42,9 +42,7 @@ class Template: r""" Returns a single pair of token ids representing prompt and response respectively. """ - encoded_pairs = self._encode( - tokenizer, messages, system, tools, cutoff_len, reserved_label_len - ) + encoded_pairs = self._encode(tokenizer, messages, system, tools, cutoff_len, reserved_label_len) prompt_ids = [] for query_ids, resp_ids in encoded_pairs[:-1]: prompt_ids += query_ids + resp_ids @@ -64,9 +62,7 @@ class Template: r""" Returns multiple pairs of token ids representing prompts and responses respectively. """ - return self._encode( - tokenizer, messages, system, tools, cutoff_len, reserved_label_len - ) + return self._encode(tokenizer, messages, system, tools, cutoff_len, reserved_label_len) def _encode( self, @@ -93,9 +89,7 @@ class Template: elements += self.format_separator.apply() if message["role"] == Role.USER.value: - elements += self.format_user.apply( - content=message["content"], idx=str(i // 2) - ) + elements += self.format_user.apply(content=message["content"], idx=str(i // 2)) elif message["role"] == Role.ASSISTANT.value: elements += self.format_assistant.apply(content=message["content"]) elif message["role"] == Role.OBSERVATION.value: @@ -130,11 +124,7 @@ class Template: elif "eos_token" in elem and tokenizer.eos_token_id is not None: token_ids += [tokenizer.eos_token_id] else: - raise ValueError( - "Input must be string, set[str] or dict[str, str], got {}".format( - type(elem) - ) - ) + raise ValueError("Input must be string, set[str] or dict[str, str], got {}".format(type(elem))) return token_ids @@ -192,9 +182,7 @@ class Llama2Template(Template): elements += self.format_separator.apply() if message["role"] == Role.USER.value: - elements += self.format_user.apply( - content=system_text + message["content"] - ) + elements += self.format_user.apply(content=system_text + message["content"]) elif message["role"] == Role.ASSISTANT.value: elements += self.format_assistant.apply(content=message["content"]) elif message["role"] == Role.OBSERVATION.value: @@ -257,9 +245,7 @@ def _register_template( template_class = Llama2Template if name.startswith("llama2") else Template default_user_formatter = StringFormatter(slots=["{{content}}"]) default_assistant_formatter = StringFormatter(slots=["{{content}}"] + eos_slots) - default_function_formatter = FunctionFormatter( - slots=["Action: {{name}}\nAction Input: {{arguments}}"] + eos_slots - ) + default_function_formatter = FunctionFormatter(slots=["Action: {{name}}\nAction Input: {{arguments}}"] + eos_slots) default_tool_formatter = ToolFormatter(tool_format="default") default_separator_formatter = EmptyFormatter() templates[name] = template_class( @@ -295,9 +281,7 @@ def _jinja_escape(content: str) -> str: return content.replace("\n", r"\n").replace("'", r"\'") -def _convert_slots_to_jinja( - slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content" -) -> str: +def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content") -> str: slot_items = [] for slot in slots: if isinstance(slot, str): @@ -311,9 +295,7 @@ def _convert_slots_to_jinja( elif isinstance(slot, set): if "bos_token" in slot: slot_items.append("'" + tokenizer.bos_token + "'") - elif ( - "eos_token" in slot - ): # do not use {{ eos_token }} since it may be replaced + elif "eos_token" in slot: # do not use {{ eos_token }} since it may be replaced slot_items.append("'" + tokenizer.eos_token + "'") elif isinstance(slot, dict): raise ValueError("Dict is not supported.") @@ -325,37 +307,25 @@ def _get_jinja_template(template: "Template", tokenizer: "PreTrainedTokenizer") jinja_template = "" if template.default_system: - jinja_template += ( - "{% set system_message = '" - + _jinja_escape(template.default_system) - + "' %}" - ) + jinja_template += "{% set system_message = '" + _jinja_escape(template.default_system) + "' %}" jinja_template += ( - "{% if messages[0]['role'] == 'system' %}" - "{% set system_message = messages[0]['content'] %}" - "{% endif %}" + "{% if messages[0]['role'] == 'system' %}" "{% set system_message = messages[0]['content'] %}" "{% endif %}" ) - system_message = _convert_slots_to_jinja( - template.format_system.apply(), tokenizer, placeholder="system_message" - ) + system_message = _convert_slots_to_jinja(template.format_system.apply(), tokenizer, placeholder="system_message") if isinstance(template, Llama2Template): pass elif template.force_system: jinja_template += "{{ " + system_message + " }}" else: - jinja_template += ( - "{% if system_message is defined %}{{ " + system_message + " }}{% endif %}" - ) + jinja_template += "{% if system_message is defined %}{{ " + system_message + " }}{% endif %}" jinja_template += "{% for message in messages %}" jinja_template += "{% set content = message['content'] %}" if isinstance(template, Llama2Template): jinja_template += "{% if loop.index0 == 0 and system_message is defined %}" - jinja_template += ( - "{% set content = " + system_message + " + message['content'] %}" - ) + jinja_template += "{% set content = " + system_message + " + message['content'] %}" jinja_template += "{% endif %}" jinja_template += "{% if message['role'] == 'user' %}" user_message = _convert_slots_to_jinja(template.format_user.apply(), tokenizer) @@ -403,9 +373,7 @@ def get_template_and_fix_tokenizer( ) logger.info("Add {} to stop words.".format(",".join(stop_words))) if num_added_tokens > 0: - logger.warning( - "New tokens have been added, make sure `resize_vocab` is True." - ) + logger.warning("New tokens have been added, make sure `resize_vocab` is True.") try: tokenizer.chat_template = _get_jinja_template(template, tokenizer) @@ -417,9 +385,7 @@ def get_template_and_fix_tokenizer( _register_template( name="alpaca", - format_user=StringFormatter( - slots=["### Instruction:\n{{content}}\n\n### Response:\n"] - ), + format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n\n### Response:\n"]), format_separator=EmptyFormatter(slots=["\n\n"]), default_system=( "Below is an instruction that describes a task. " @@ -458,9 +424,7 @@ _register_template( _register_template( name="baichuan", - format_user=StringFormatter( - slots=[{"token": ""}, "{{content}}", {"token": ""}] - ), + format_user=StringFormatter(slots=[{"token": ""}, "{{content}}", {"token": ""}]), efficient_eos=True, ) @@ -483,9 +447,7 @@ _register_template( _register_template( name="bluelm", - format_user=StringFormatter( - slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}] - ), + format_user=StringFormatter(slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}]), ) @@ -504,9 +466,7 @@ _register_template( _register_template( name="chatglm2", format_user=StringFormatter(slots=["[Round {{idx}}]\n\n问:{{content}}\n\n答:"]), - format_system=StringFormatter( - slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"] - ), + format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), format_separator=EmptyFormatter(slots=["\n\n"]), efficient_eos=True, force_system=True, @@ -515,13 +475,9 @@ _register_template( _register_template( name="chatglm3", - format_user=StringFormatter( - slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] - ), + format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), format_assistant=StringFormatter(slots=["\n", "{{content}}"]), - format_system=StringFormatter( - slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"] - ), + format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]), format_observation=StringFormatter( slots=[ @@ -539,9 +495,7 @@ _register_template( _register_template( name="chatglm3_system", - format_user=StringFormatter( - slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] - ), + format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), format_assistant=StringFormatter(slots=["\n", "{{content}}"]), format_system=StringFormatter( slots=[ @@ -572,15 +526,9 @@ _register_template( _register_template( name="chatml", - format_user=StringFormatter( - slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), - format_system=StringFormatter( - slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] - ), - format_observation=StringFormatter( - slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), + format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), + format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_separator=EmptyFormatter(slots=["\n"]), stop_words=["<|im_end|>", "<|im_start|>"], replace_eos=True, @@ -589,15 +537,9 @@ _register_template( _register_template( name="chatml_de", - format_user=StringFormatter( - slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), - format_system=StringFormatter( - slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] - ), - format_observation=StringFormatter( - slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), + format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), + format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_separator=EmptyFormatter(slots=["\n"]), default_system="Du bist ein freundlicher und hilfsbereiter KI-Assistent.", stop_words=["<|im_end|>", "<|im_start|>"], @@ -607,9 +549,7 @@ _register_template( _register_template( name="codegeex2", - format_system=StringFormatter( - slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"] - ), + format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), force_system=True, ) @@ -639,15 +579,9 @@ _register_template( _register_template( name="dbrx", - format_user=StringFormatter( - slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), - format_system=StringFormatter( - slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] - ), - format_observation=StringFormatter( - slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), + format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), + format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_separator=EmptyFormatter(slots=["\n"]), default_system=( "You are DBRX, created by Databricks. You were last updated in December 2023. " @@ -725,9 +659,7 @@ _register_template( _register_template( name="gemma", - format_user=StringFormatter( - slots=["user\n{{content}}\nmodel\n"] - ), + format_user=StringFormatter(slots=["user\n{{content}}\nmodel\n"]), format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]), format_observation=StringFormatter( slots=["tool\n{{content}}\nmodel\n"] @@ -740,9 +672,7 @@ _register_template( _register_template( name="intern", - format_user=StringFormatter( - slots=["<|User|>:{{content}}", {"token": ""}, "\n<|Bot|>:"] - ), + format_user=StringFormatter(slots=["<|User|>:{{content}}", {"token": ""}, "\n<|Bot|>:"]), format_separator=EmptyFormatter(slots=[{"token": ""}, "\n"]), stop_words=[""], efficient_eos=True, @@ -751,12 +681,8 @@ _register_template( _register_template( name="intern2", - format_user=StringFormatter( - slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), - format_system=StringFormatter( - slots=[{"bos_token"}, "<|im_start|>system\n{{content}}<|im_end|>\n"] - ), + format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_system=StringFormatter(slots=[{"bos_token"}, "<|im_start|>system\n{{content}}<|im_end|>\n"]), format_separator=EmptyFormatter(slots=["\n"]), default_system=( "You are an AI assistant whose name is InternLM (书生·浦语).\n" @@ -859,9 +785,7 @@ _register_template( _register_template( name="orion", - format_user=StringFormatter( - slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}] - ), + format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}]), format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]), force_system=True, ) @@ -869,15 +793,9 @@ _register_template( _register_template( name="phi", - format_user=StringFormatter( - slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"] - ), - format_system=StringFormatter( - slots=[{"bos_token"}, "<|system|>\n{{content}}<|end|>\n"] - ), - format_observation=StringFormatter( - slots=["<|function_output|>\n{{content}}<|end|>\n<|assistant|>\n"] - ), + format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"]), + format_system=StringFormatter(slots=[{"bos_token"}, "<|system|>\n{{content}}<|end|>\n"]), + format_observation=StringFormatter(slots=["<|function_output|>\n{{content}}<|end|>\n<|assistant|>\n"]), format_separator=EmptyFormatter(slots=["\n"]), default_system="You are a helpful AI assistant.", stop_words=["<|end|>"], @@ -887,15 +805,9 @@ _register_template( _register_template( name="qwen", - format_user=StringFormatter( - slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), - format_system=StringFormatter( - slots=["<|im_start|>system\n{{content}}<|im_end|>\n"] - ), - format_observation=StringFormatter( - slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), + format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), + format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), + format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_separator=EmptyFormatter(slots=["\n"]), default_system="You are a helpful assistant.", stop_words=["<|im_end|>"], @@ -951,12 +863,8 @@ _register_template( _register_template( name="yayi", - format_user=StringFormatter( - slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"] - ), - format_system=StringFormatter( - slots=[{"token": "<|System|>"}, ":\n{{content}}\n\n"] - ), + format_user=StringFormatter(slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"]), + format_system=StringFormatter(slots=[{"token": "<|System|>"}, ":\n{{content}}\n\n"]), format_separator=EmptyFormatter(slots=["\n\n"]), default_system=( "You are a helpful, respectful and honest assistant named YaYi " @@ -975,9 +883,7 @@ _register_template( _register_template( name="yi", - format_user=StringFormatter( - slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] - ), + format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_separator=EmptyFormatter(slots=["\n"]), stop_words=["<|im_end|>"], replace_eos=True, @@ -995,9 +901,7 @@ _register_template( _register_template( name="zephyr", - format_user=StringFormatter( - slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"] - ), + format_user=StringFormatter(slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"]), format_assistant=StringFormatter(slots=["\n{{content}}", {"eos_token"}]), format_system=StringFormatter(slots=["<|system|>\n{{content}}", {"eos_token"}]), default_system="You are a friendly chatbot who always responds in the style of a pirate", diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 66ac93cf..df1a5ec0 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -15,33 +15,23 @@ class ModelArguments: ) adapter_name_or_path: Optional[str] = field( default=None, - metadata={ - "help": "Path to the adapter weight or identifier from huggingface.co/models." - }, + metadata={"help": "Path to the adapter weight or identifier from huggingface.co/models."}, ) cache_dir: Optional[str] = field( default=None, - metadata={ - "help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn." - }, + metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn."}, ) use_fast_tokenizer: bool = field( default=True, - metadata={ - "help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)." - }, + metadata={"help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)."}, ) resize_vocab: bool = field( default=False, - metadata={ - "help": "Whether or not to resize the tokenizer vocab and the embedding layers." - }, + metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."}, ) split_special_tokens: bool = field( default=False, - metadata={ - "help": "Whether or not the special tokens should be split during the tokenization process." - }, + metadata={"help": "Whether or not the special tokens should be split during the tokenization process."}, ) new_special_tokens: Optional[str] = field( default=None, @@ -49,9 +39,7 @@ class ModelArguments: ) model_revision: str = field( default="main", - metadata={ - "help": "The specific model version to use (can be a branch name, tag name or commit id)." - }, + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) low_cpu_mem_usage: bool = field( default=True, @@ -59,9 +47,7 @@ class ModelArguments: ) quantization_bit: Optional[int] = field( default=None, - metadata={ - "help": "The number of bits to quantize the model using bitsandbytes." - }, + metadata={"help": "The number of bits to quantize the model using bitsandbytes."}, ) quantization_type: Literal["fp4", "nf4"] = field( default="nf4", @@ -69,21 +55,15 @@ class ModelArguments: ) double_quantization: bool = field( default=True, - metadata={ - "help": "Whether or not to use double quantization in int4 training." - }, + metadata={"help": "Whether or not to use double quantization in int4 training."}, ) quantization_device_map: Optional[Literal["auto"]] = field( default=None, - metadata={ - "help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0." - }, + metadata={"help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0."}, ) rope_scaling: Optional[Literal["linear", "dynamic"]] = field( default=None, - metadata={ - "help": "Which scaling strategy should be adopted for the RoPE embeddings." - }, + metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."}, ) flash_attn: Literal["off", "sdpa", "fa2", "auto"] = field( default="auto", @@ -91,27 +71,19 @@ class ModelArguments: ) shift_attn: bool = field( default=False, - metadata={ - "help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA." - }, + metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."}, ) mixture_of_depths: Optional[Literal["convert", "load"]] = field( default=None, - metadata={ - "help": "Convert the model to mixture-of-depths (MoD) or load the MoD model." - }, + metadata={"help": "Convert the model to mixture-of-depths (MoD) or load the MoD model."}, ) use_unsloth: bool = field( default=False, - metadata={ - "help": "Whether or not to use unsloth's optimization for the LoRA training." - }, + metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."}, ) moe_aux_loss_coef: Optional[float] = field( default=None, - metadata={ - "help": "Coefficient of the auxiliary router loss in mixture-of-experts model." - }, + metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, ) disable_gradient_checkpointing: bool = field( default=False, @@ -135,9 +107,7 @@ class ModelArguments: ) vllm_gpu_util: float = field( default=0.9, - metadata={ - "help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine." - }, + metadata={"help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine."}, ) vllm_enforce_eager: bool = field( default=False, @@ -177,9 +147,7 @@ class ModelArguments: ) export_quantization_dataset: Optional[str] = field( default=None, - metadata={ - "help": "Path to the dataset or dataset name to use in quantizing the exported model." - }, + metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."}, ) export_quantization_nsamples: int = field( default=128, @@ -187,27 +155,19 @@ class ModelArguments: ) export_quantization_maxlen: int = field( default=1024, - metadata={ - "help": "The maximum length of the model inputs used for quantization." - }, + metadata={"help": "The maximum length of the model inputs used for quantization."}, ) export_legacy_format: bool = field( default=False, - metadata={ - "help": "Whether or not to save the `.bin` files instead of `.safetensors`." - }, + metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."}, ) export_hub_model_id: Optional[str] = field( default=None, - metadata={ - "help": "The name of the repository if push the model to the Hugging Face hub." - }, + metadata={"help": "The name of the repository if push the model to the Hugging Face hub."}, ) print_param_status: bool = field( default=False, - metadata={ - "help": "For debugging purposes, print the status of the parameters in the model." - }, + metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) use_mllm: bool = field( default=False, @@ -220,21 +180,13 @@ class ModelArguments: self.model_max_length = None if self.split_special_tokens and self.use_fast_tokenizer: - raise ValueError( - "`split_special_tokens` is only supported for slow tokenizers." - ) + raise ValueError("`split_special_tokens` is only supported for slow tokenizers.") - if ( - self.adapter_name_or_path is not None - ): # support merging multiple lora weights - self.adapter_name_or_path = [ - path.strip() for path in self.adapter_name_or_path.split(",") - ] + if self.adapter_name_or_path is not None: # support merging multiple lora weights + self.adapter_name_or_path = [path.strip() for path in self.adapter_name_or_path.split(",")] if self.new_special_tokens is not None: # support multiple special tokens - self.new_special_tokens = [ - token.strip() for token in self.new_special_tokens.split(",") - ] + self.new_special_tokens = [token.strip() for token in self.new_special_tokens.split(",")] assert self.quantization_bit in [ None, @@ -249,10 +201,7 @@ class ModelArguments: 2, ], "We only accept 2/3/4/8-bit quantization." - if ( - self.export_quantization_bit is not None - and self.export_quantization_dataset is None - ): + if self.export_quantization_bit is not None and self.export_quantization_dataset is None: raise ValueError("Quantization dataset is necessary for exporting.") def to_dict(self) -> Dict[str, Any]: diff --git a/src/llmtuner/model/__init__.py b/src/llmtuner/model/__init__.py index db81e1dc..1824f084 100644 --- a/src/llmtuner/model/__init__.py +++ b/src/llmtuner/model/__init__.py @@ -1,6 +1,7 @@ from .loader import load_config, load_model, load_tokenizer from .utils.misc import find_all_linear_modules, load_valuehead_params + __all__ = [ "load_config", "load_model", diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index e65798b7..f3db4d1e 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -38,9 +38,7 @@ def init_adapter( logger.info("Adapter is not found at evaluation, load the base model.") return model - if finetuning_args.finetuning_type != "lora" and getattr( - model, "quantization_method", None - ): + if finetuning_args.finetuning_type != "lora" and getattr(model, "quantization_method", None): raise ValueError("You can only use lora for quantized models.") if finetuning_args.finetuning_type == "full" and is_trainable: @@ -68,12 +66,8 @@ def init_adapter( stride = num_layers // finetuning_args.num_layer_trainable trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride) - elif ( - finetuning_args.num_layer_trainable > 0 - ): # fine-tuning the last n layers if num_layer_trainable > 0 - trainable_layer_ids = range( - num_layers - finetuning_args.num_layer_trainable, num_layers - ) + elif finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0 + trainable_layer_ids = range(num_layers - finetuning_args.num_layer_trainable, num_layers) else: # fine-tuning the first n layers if num_layer_trainable < 0 trainable_layer_ids = range(-finetuning_args.num_layer_trainable) @@ -88,15 +82,11 @@ def init_adapter( for module_name in finetuning_args.name_module_trainable: if module_name not in freeze_modules: raise ValueError( - "Module {} is not found, please choose from {}".format( - module_name, ", ".join(freeze_modules) - ) + "Module {} is not found, please choose from {}".format(module_name, ", ".join(freeze_modules)) ) for idx in trainable_layer_ids: - trainable_layers.append( - ".{:d}.{}".format(idx, module_name if module_name != "all" else "") - ) + trainable_layers.append(".{:d}.{}".format(idx, module_name if module_name != "all" else "")) for name, param in model.named_parameters(): if any(trainable_layer in name for trainable_layer in trainable_layers): @@ -105,43 +95,27 @@ def init_adapter( else: param.requires_grad_(False) - logger.info( - "Set trainable layers: {}".format(",".join(map(str, trainable_layer_ids))) - ) + logger.info("Set trainable layers: {}".format(",".join(map(str, trainable_layer_ids)))) if finetuning_args.finetuning_type == "lora": - logger.info( - "Fine-tuning method: {}".format( - "DoRA" if finetuning_args.use_dora else "LoRA" - ) - ) + logger.info("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA")) adapter_to_resume = None if model_args.adapter_name_or_path is not None: is_mergeable = True - if getattr( - model, "quantization_method", None - ): # merge lora in quantized model is unstable - assert ( - len(model_args.adapter_name_or_path) == 1 - ), "Quantized model only accepts a single adapter." + if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable + assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter." is_mergeable = False if is_deepspeed_zero3_enabled(): - assert ( - len(model_args.adapter_name_or_path) == 1 - ), "Cannot use multiple adapters in DeepSpeed ZeRO-3." + assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3." is_mergeable = False if model_args.use_unsloth: - assert ( - len(model_args.adapter_name_or_path) == 1 - ), "Unsloth model only accepts a single adapter." + assert len(model_args.adapter_name_or_path) == 1, "Unsloth model only accepts a single adapter." is_mergeable = False - if (is_trainable and not finetuning_args.create_new_adapter) or ( - not is_mergeable - ): + if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable): adapter_to_merge = model_args.adapter_name_or_path[:-1] adapter_to_resume = model_args.adapter_name_or_path[-1] else: @@ -158,9 +132,7 @@ def init_adapter( if adapter_to_resume is not None: # resume lora training if model_args.use_unsloth: - model = load_unsloth_peft_model( - config, model_args, is_trainable=is_trainable - ) + model = load_unsloth_peft_model(config, model_args, is_trainable=is_trainable) else: model = PeftModel.from_pretrained( model, @@ -169,27 +141,19 @@ def init_adapter( offload_folder=model_args.offload_folder, ) - if ( - is_trainable and adapter_to_resume is None - ): # create new lora weights while training - if ( - len(finetuning_args.lora_target) == 1 - and finetuning_args.lora_target[0] == "all" - ): + if is_trainable and adapter_to_resume is None: # create new lora weights while training + if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": target_modules = find_all_linear_modules(model) else: target_modules = finetuning_args.lora_target if finetuning_args.use_llama_pro: - target_modules = find_expanded_modules( - model, target_modules, finetuning_args.num_layer_trainable - ) + target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable) if ( finetuning_args.use_dora and getattr(model, "quantization_method", None) is not None - and getattr(model, "quantization_method", None) - != QuantizationMethod.BITS_AND_BYTES + and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES ): raise ValueError("DoRA is not compatible with PTQ-quantized models.") @@ -202,11 +166,7 @@ def init_adapter( module_names.add(name.split(".")[-1]) finetuning_args.additional_target = module_names - logger.warning( - "Vocab has been resized, add {} to trainable params.".format( - ",".join(module_names) - ) - ) + logger.warning("Vocab has been resized, add {} to trainable params.".format(",".join(module_names))) peft_kwargs = { "r": finetuning_args.lora_rank, @@ -233,10 +193,6 @@ def init_adapter( param.data = param.data.to(torch.float32) if model_args.adapter_name_or_path is not None: - logger.info( - "Loaded adapter(s): {}".format( - ",".join(model_args.adapter_name_or_path) - ) - ) + logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) return model diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 99ad9adc..47298673 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -3,9 +3,9 @@ from typing import TYPE_CHECKING, Any, Dict, Union from transformers import ( AutoConfig, AutoModelForCausalLM, - AutoTokenizer, - AutoProcessor, AutoModelForVision2Seq, + AutoProcessor, + AutoTokenizer, ) from trl import AutoModelForCausalLMWithValueHead @@ -17,6 +17,7 @@ from .utils.misc import load_valuehead_params, register_autoclass from .utils.mod import convert_pretrained_model_to_mod, load_mod_pretrained_model from .utils.unsloth import load_unsloth_pretrained_model + if TYPE_CHECKING: from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer @@ -42,7 +43,7 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]: def load_tokenizer( model_args: "ModelArguments", -) -> Dict[str, Union["PreTrainedTokenizer", "AutoProcesser"]]: +) -> Dict[str, Union["PreTrainedTokenizer", "AutoProcessor"]]: r""" Loads pretrained tokenizer. @@ -70,14 +71,10 @@ def load_tokenizer( dict(additional_special_tokens=model_args.new_special_tokens), replace_additional_special_tokens=False, ) - logger.info( - "Add {} to special tokens.".format(",".join(model_args.new_special_tokens)) - ) + logger.info("Add {} to special tokens.".format(",".join(model_args.new_special_tokens))) if num_added_tokens > 0 and not model_args.resize_vocab: model_args.resize_vocab = True - logger.warning( - "New tokens have been added, changed `resize_vocab` to True." - ) + logger.warning("New tokens have been added, changed `resize_vocab` to True.") patch_tokenizer(tokenizer) tokenizer_modules = {"tokenizer": tokenizer, "processor": None} @@ -174,10 +171,8 @@ def load_model( trainable_params, all_param = count_parameters(model) if is_trainable: - param_stats = ( - "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( - trainable_params, all_param, 100 * trainable_params / all_param - ) + param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( + trainable_params, all_param, 100 * trainable_params / all_param ) else: param_stats = "all params: {:d}".format(all_param) diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index 6f887810..50833a99 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -50,29 +50,17 @@ def run_sft( tokenizer.padding_side = "left" # use left-padding in generation if getattr(model, "is_quantized", False) and not training_args.do_train: - setattr( - model, "_hf_peft_config_loaded", True - ) # hack here: make model compatible with prediction + setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction data_collator = DataCollatorForSeq2Seq( tokenizer=tokenizer, - pad_to_multiple_of=( - 8 if tokenizer.padding_side == "right" else None - ), # for shift short attention - label_pad_token_id=( - IGNORE_INDEX - if data_args.ignore_pad_token_for_loss - else tokenizer.pad_token_id - ), + pad_to_multiple_of=(8 if tokenizer.padding_side == "right" else None), # for shift short attention + label_pad_token_id=(IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id), ) # Override the decoding parameters of Seq2SeqTrainer - training_args.generation_max_length = ( - training_args.generation_max_length or data_args.cutoff_len - ) - training_args.generation_num_beams = ( - data_args.eval_num_beams or training_args.generation_num_beams - ) + training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len + training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams if model_args.use_mllm: training_args.remove_unused_columns = False @@ -84,25 +72,19 @@ def run_sft( tokenizer=tokenizer, data_collator=data_collator, callbacks=callbacks, - compute_metrics=( - ComputeMetrics(tokenizer) if training_args.predict_with_generate else None - ), + compute_metrics=(ComputeMetrics(tokenizer) if training_args.predict_with_generate else None), **split_dataset(dataset, data_args, training_args), ) # Keyword arguments for `model.generate` gen_kwargs = generating_args.to_dict() - gen_kwargs["eos_token_id"] = [ - tokenizer.eos_token_id - ] + tokenizer.additional_special_tokens_ids + gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids gen_kwargs["pad_token_id"] = tokenizer.pad_token_id gen_kwargs["logits_processor"] = get_logits_processor() # Training if training_args.do_train: - train_result = trainer.train( - resume_from_checkpoint=training_args.resume_from_checkpoint - ) + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) @@ -113,27 +95,19 @@ def run_sft( # Evaluation if training_args.do_eval: metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) - if ( - training_args.predict_with_generate - ): # eval_loss will be wrong if predict_with_generate is enabled + if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled metrics.pop("eval_loss", None) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: - predict_results = trainer.predict( - dataset, metric_key_prefix="predict", **gen_kwargs - ) - if ( - training_args.predict_with_generate - ): # predict_loss will be wrong if predict_with_generate is enabled + predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) + if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled predict_results.metrics.pop("predict_loss", None) trainer.log_metrics("predict", predict_results.metrics) trainer.save_metrics("predict", predict_results.metrics) trainer.save_predictions(predict_results) # Create model card - create_modelcard_and_push( - trainer, model_args, data_args, training_args, finetuning_args - ) + create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) diff --git a/src/llmtuner/train/sftmm/__init__.py b/src/llmtuner/train/sftmm/__init__.py new file mode 100644 index 00000000..9ebdf821 --- /dev/null +++ b/src/llmtuner/train/sftmm/__init__.py @@ -0,0 +1,4 @@ +from .workflow import run_sft_mm + + +__all__ = ["run_sft_mm"] diff --git a/src/llmtuner/train/sftmm/metric.py b/src/llmtuner/train/sftmm/metric.py new file mode 100644 index 00000000..d1af4c17 --- /dev/null +++ b/src/llmtuner/train/sftmm/metric.py @@ -0,0 +1,61 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, Sequence, Tuple, Union + +import numpy as np + +from ...extras.constants import IGNORE_INDEX +from ...extras.packages import is_jieba_available, is_nltk_available, is_rouge_available + + +if TYPE_CHECKING: + from transformers.tokenization_utils import PreTrainedTokenizer + +if is_jieba_available(): + import jieba # type: ignore + +if is_nltk_available(): + from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu + +if is_rouge_available(): + from rouge_chinese import Rouge + + +@dataclass +class ComputeMetrics: + r""" + Wraps the tokenizer into metric functions, used in Seq2SeqPeftTrainer. + """ + + tokenizer: "PreTrainedTokenizer" + + def __call__(self, eval_preds: Sequence[Union[np.ndarray, Tuple[np.ndarray]]]) -> Dict[str, float]: + r""" + Uses the model predictions to compute metrics. + """ + preds, labels = eval_preds + score_dict = {"rouge-1": [], "rouge-2": [], "rouge-l": [], "bleu-4": []} + + preds = np.where(preds != IGNORE_INDEX, preds, self.tokenizer.pad_token_id) + labels = np.where(labels != IGNORE_INDEX, labels, self.tokenizer.pad_token_id) + + decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True) + decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True) + + for pred, label in zip(decoded_preds, decoded_labels): + hypothesis = list(jieba.cut(pred)) + reference = list(jieba.cut(label)) + + if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0: + result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}} + else: + rouge = Rouge() + scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference)) + result = scores[0] + + for k, v in result.items(): + score_dict[k].append(round(v["f"] * 100, 4)) + + bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) + score_dict["bleu-4"].append(round(bleu_score * 100, 4)) + + return {k: float(np.mean(v)) for k, v in score_dict.items()} diff --git a/src/llmtuner/train/sftmm/trainer.py b/src/llmtuner/train/sftmm/trainer.py new file mode 100644 index 00000000..270e7169 --- /dev/null +++ b/src/llmtuner/train/sftmm/trainer.py @@ -0,0 +1,39 @@ +from types import MethodType +from typing import TYPE_CHECKING, Optional + +import torch +from transformers import Seq2SeqTrainer + +from ...extras.logging import get_logger +from ..utils import create_custom_optimzer, create_custom_scheduler + + +if TYPE_CHECKING: + from ...hparams import FinetuningArguments + +logger = get_logger(__name__) + + +class CustomSeq2SeqTrainer(Seq2SeqTrainer): + r""" + Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE. + """ + + def __init__(self, finetuning_args: "FinetuningArguments", **kwargs) -> None: + super().__init__(**kwargs) + self.finetuning_args = finetuning_args + if finetuning_args.use_badam: + from badam import clip_grad_norm_for_sparse_tensor + + self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator) + + def create_optimizer(self) -> "torch.optim.Optimizer": + if self.optimizer is None: + self.optimizer = create_custom_optimzer(self.model, self.args, self.finetuning_args) + return super().create_optimizer() + + def create_scheduler( + self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None + ) -> "torch.optim.lr_scheduler.LRScheduler": + create_custom_scheduler(self.args, num_training_steps, optimizer) + return super().create_scheduler(num_training_steps, optimizer) diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py new file mode 100644 index 00000000..dbda2d05 --- /dev/null +++ b/src/llmtuner/train/sftmm/workflow.py @@ -0,0 +1,101 @@ +# Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py +from typing import TYPE_CHECKING, List, Optional + +from transformers import DataCollatorForSeq2Seq + +from ...data import get_dataset +from ...extras.constants import IGNORE_INDEX +from ...extras.misc import get_logits_processor +from ...extras.ploting import plot_loss +from ...model import load_model, load_processor +from ..sft.metric import ComputeMetrics +from ..utils import create_modelcard_and_push +from .trainer import CustomSeq2SeqTrainer + + +if TYPE_CHECKING: + from transformers import Seq2SeqTrainingArguments, TrainerCallback + + from ...hparams import ( + DataArguments, + FinetuningArguments, + GeneratingArguments, + ModelArguments, + ) + + +def run_sft_mm( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + callbacks: Optional[List["TrainerCallback"]] = None, +): + processor = load_processor(model_args) + tokenizer = processor.tokenizer + dataset = get_dataset(tokenizer, model_args, data_args, training_args, "sft", processor) + model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) + if getattr(model, "is_quantized", False) and not training_args.do_train: + setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction + train_dataset = dataset + eval_dataset = dataset + data_collator = DataCollatorForSeq2Seq( + tokenizer=tokenizer, + pad_to_multiple_of=(8 if tokenizer.padding_side == "right" else None), # for shift short attention + label_pad_token_id=(IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id), + ) + + # Override the decoding parameters of Seq2SeqTrainer + training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len + training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams + training_args.remove_unused_columns = False + + # Initialize our Trainer + trainer = CustomSeq2SeqTrainer( + model=model, + args=training_args, + finetuning_args=finetuning_args, + tokenizer=tokenizer, + data_collator=data_collator, + callbacks=callbacks, + compute_metrics=(ComputeMetrics(tokenizer) if training_args.predict_with_generate else None), + train_dataset=train_dataset, + eval_dataset=eval_dataset, + ) + + # Keyword arguments for `model.generate` + gen_kwargs = generating_args.to_dict() + gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids + gen_kwargs["pad_token_id"] = tokenizer.pad_token_id + gen_kwargs["logits_processor"] = get_logits_processor() + + # Training + if training_args.do_train: + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + trainer.save_model() + trainer.log_metrics("train", train_result.metrics) + trainer.save_metrics("train", train_result.metrics) + trainer.save_state() + if trainer.is_world_process_zero() and finetuning_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) + + # Evaluation + if training_args.do_eval: + metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) + if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled + metrics.pop("eval_loss", None) + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + # Predict + if training_args.do_predict: + predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) + if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled + predict_results.metrics.pop("predict_loss", None) + trainer.log_metrics("predict", predict_results.metrics) + trainer.save_metrics("predict", predict_results.metrics) + trainer.save_predictions(predict_results) + + # Create model card + create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 5f691225..e1999946 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -15,6 +15,7 @@ from .pt import run_pt from .rm import run_rm from .sft import run_sft + if TYPE_CHECKING: from transformers import TrainerCallback From fc0fa9f048aa7ac2f74da15c0250bae5c46cf386 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 21:27:48 +0800 Subject: [PATCH 13/34] modify style --- src/llmtuner/hparams/model_args.py | 14 +--- src/llmtuner/train/sftmm/__init__.py | 4 -- src/llmtuner/train/sftmm/metric.py | 61 ---------------- src/llmtuner/train/sftmm/trainer.py | 39 ----------- src/llmtuner/train/sftmm/workflow.py | 101 --------------------------- 5 files changed, 2 insertions(+), 217 deletions(-) delete mode 100644 src/llmtuner/train/sftmm/__init__.py delete mode 100644 src/llmtuner/train/sftmm/metric.py delete mode 100644 src/llmtuner/train/sftmm/trainer.py delete mode 100644 src/llmtuner/train/sftmm/workflow.py diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index df1a5ec0..97b908e4 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -188,18 +188,8 @@ class ModelArguments: if self.new_special_tokens is not None: # support multiple special tokens self.new_special_tokens = [token.strip() for token in self.new_special_tokens.split(",")] - assert self.quantization_bit in [ - None, - 8, - 4, - ], "We only accept 4-bit or 8-bit quantization." - assert self.export_quantization_bit in [ - None, - 8, - 4, - 3, - 2, - ], "We only accept 2/3/4/8-bit quantization." + assert self.quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization." + assert self.export_quantization_bit in [None, 8, 4, 3, 2], "We only accept 2/3/4/8-bit quantization." if self.export_quantization_bit is not None and self.export_quantization_dataset is None: raise ValueError("Quantization dataset is necessary for exporting.") diff --git a/src/llmtuner/train/sftmm/__init__.py b/src/llmtuner/train/sftmm/__init__.py deleted file mode 100644 index 9ebdf821..00000000 --- a/src/llmtuner/train/sftmm/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .workflow import run_sft_mm - - -__all__ = ["run_sft_mm"] diff --git a/src/llmtuner/train/sftmm/metric.py b/src/llmtuner/train/sftmm/metric.py deleted file mode 100644 index d1af4c17..00000000 --- a/src/llmtuner/train/sftmm/metric.py +++ /dev/null @@ -1,61 +0,0 @@ -from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, Sequence, Tuple, Union - -import numpy as np - -from ...extras.constants import IGNORE_INDEX -from ...extras.packages import is_jieba_available, is_nltk_available, is_rouge_available - - -if TYPE_CHECKING: - from transformers.tokenization_utils import PreTrainedTokenizer - -if is_jieba_available(): - import jieba # type: ignore - -if is_nltk_available(): - from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu - -if is_rouge_available(): - from rouge_chinese import Rouge - - -@dataclass -class ComputeMetrics: - r""" - Wraps the tokenizer into metric functions, used in Seq2SeqPeftTrainer. - """ - - tokenizer: "PreTrainedTokenizer" - - def __call__(self, eval_preds: Sequence[Union[np.ndarray, Tuple[np.ndarray]]]) -> Dict[str, float]: - r""" - Uses the model predictions to compute metrics. - """ - preds, labels = eval_preds - score_dict = {"rouge-1": [], "rouge-2": [], "rouge-l": [], "bleu-4": []} - - preds = np.where(preds != IGNORE_INDEX, preds, self.tokenizer.pad_token_id) - labels = np.where(labels != IGNORE_INDEX, labels, self.tokenizer.pad_token_id) - - decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True) - decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True) - - for pred, label in zip(decoded_preds, decoded_labels): - hypothesis = list(jieba.cut(pred)) - reference = list(jieba.cut(label)) - - if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0: - result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}} - else: - rouge = Rouge() - scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference)) - result = scores[0] - - for k, v in result.items(): - score_dict[k].append(round(v["f"] * 100, 4)) - - bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) - score_dict["bleu-4"].append(round(bleu_score * 100, 4)) - - return {k: float(np.mean(v)) for k, v in score_dict.items()} diff --git a/src/llmtuner/train/sftmm/trainer.py b/src/llmtuner/train/sftmm/trainer.py deleted file mode 100644 index 270e7169..00000000 --- a/src/llmtuner/train/sftmm/trainer.py +++ /dev/null @@ -1,39 +0,0 @@ -from types import MethodType -from typing import TYPE_CHECKING, Optional - -import torch -from transformers import Seq2SeqTrainer - -from ...extras.logging import get_logger -from ..utils import create_custom_optimzer, create_custom_scheduler - - -if TYPE_CHECKING: - from ...hparams import FinetuningArguments - -logger = get_logger(__name__) - - -class CustomSeq2SeqTrainer(Seq2SeqTrainer): - r""" - Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE. - """ - - def __init__(self, finetuning_args: "FinetuningArguments", **kwargs) -> None: - super().__init__(**kwargs) - self.finetuning_args = finetuning_args - if finetuning_args.use_badam: - from badam import clip_grad_norm_for_sparse_tensor - - self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_for_sparse_tensor, self.accelerator) - - def create_optimizer(self) -> "torch.optim.Optimizer": - if self.optimizer is None: - self.optimizer = create_custom_optimzer(self.model, self.args, self.finetuning_args) - return super().create_optimizer() - - def create_scheduler( - self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None - ) -> "torch.optim.lr_scheduler.LRScheduler": - create_custom_scheduler(self.args, num_training_steps, optimizer) - return super().create_scheduler(num_training_steps, optimizer) diff --git a/src/llmtuner/train/sftmm/workflow.py b/src/llmtuner/train/sftmm/workflow.py deleted file mode 100644 index dbda2d05..00000000 --- a/src/llmtuner/train/sftmm/workflow.py +++ /dev/null @@ -1,101 +0,0 @@ -# Inspired by: https://github.com/huggingface/transformers/blob/v4.34.1/examples/pytorch/summarization/run_summarization.py -from typing import TYPE_CHECKING, List, Optional - -from transformers import DataCollatorForSeq2Seq - -from ...data import get_dataset -from ...extras.constants import IGNORE_INDEX -from ...extras.misc import get_logits_processor -from ...extras.ploting import plot_loss -from ...model import load_model, load_processor -from ..sft.metric import ComputeMetrics -from ..utils import create_modelcard_and_push -from .trainer import CustomSeq2SeqTrainer - - -if TYPE_CHECKING: - from transformers import Seq2SeqTrainingArguments, TrainerCallback - - from ...hparams import ( - DataArguments, - FinetuningArguments, - GeneratingArguments, - ModelArguments, - ) - - -def run_sft_mm( - model_args: "ModelArguments", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - finetuning_args: "FinetuningArguments", - generating_args: "GeneratingArguments", - callbacks: Optional[List["TrainerCallback"]] = None, -): - processor = load_processor(model_args) - tokenizer = processor.tokenizer - dataset = get_dataset(tokenizer, model_args, data_args, training_args, "sft", processor) - model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) - if getattr(model, "is_quantized", False) and not training_args.do_train: - setattr(model, "_hf_peft_config_loaded", True) # hack here: make model compatible with prediction - train_dataset = dataset - eval_dataset = dataset - data_collator = DataCollatorForSeq2Seq( - tokenizer=tokenizer, - pad_to_multiple_of=(8 if tokenizer.padding_side == "right" else None), # for shift short attention - label_pad_token_id=(IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id), - ) - - # Override the decoding parameters of Seq2SeqTrainer - training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len - training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams - training_args.remove_unused_columns = False - - # Initialize our Trainer - trainer = CustomSeq2SeqTrainer( - model=model, - args=training_args, - finetuning_args=finetuning_args, - tokenizer=tokenizer, - data_collator=data_collator, - callbacks=callbacks, - compute_metrics=(ComputeMetrics(tokenizer) if training_args.predict_with_generate else None), - train_dataset=train_dataset, - eval_dataset=eval_dataset, - ) - - # Keyword arguments for `model.generate` - gen_kwargs = generating_args.to_dict() - gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids - gen_kwargs["pad_token_id"] = tokenizer.pad_token_id - gen_kwargs["logits_processor"] = get_logits_processor() - - # Training - if training_args.do_train: - train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) - trainer.save_model() - trainer.log_metrics("train", train_result.metrics) - trainer.save_metrics("train", train_result.metrics) - trainer.save_state() - if trainer.is_world_process_zero() and finetuning_args.plot_loss: - plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) - - # Evaluation - if training_args.do_eval: - metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) - if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled - metrics.pop("eval_loss", None) - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Predict - if training_args.do_predict: - predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) - if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled - predict_results.metrics.pop("predict_loss", None) - trainer.log_metrics("predict", predict_results.metrics) - trainer.save_metrics("predict", predict_results.metrics) - trainer.save_predictions(predict_results) - - # Create model card - create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) From 235b4113709fe788b4f1a1a3089ce8356940877b Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 21:29:50 +0800 Subject: [PATCH 14/34] modify style --- src/llmtuner/model/adapter.py | 6 +++--- src/llmtuner/model/loader.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index f3db4d1e..d43e00f0 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Union +from typing import TYPE_CHECKING import torch from peft import LoraConfig, LoraModel, PeftModel, TaskType, get_peft_model @@ -21,11 +21,11 @@ logger = get_logger(__name__) def init_adapter( config: "PretrainedConfig", - model: Union["PreTrainedModel"], + model: "PreTrainedModel", model_args: "ModelArguments", finetuning_args: "FinetuningArguments", is_trainable: bool, -) -> Union["PreTrainedModel"]: +) -> "PreTrainedModel": r""" Initializes the adapters. diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 47298673..dd7eb44c 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -112,7 +112,7 @@ def load_model( finetuning_args: "FinetuningArguments", is_trainable: bool = False, add_valuehead: bool = False, -) -> Union["PreTrainedModel"]: +) -> "PreTrainedModel": r""" Loads pretrained model. """ From 2cab2d42fb37454760bdc1b45ceae7844871e29b Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 21:32:01 +0800 Subject: [PATCH 15/34] make dataset script --- scripts/make_mllm_instruct.py | 95 ----------------------------------- 1 file changed, 95 deletions(-) delete mode 100644 scripts/make_mllm_instruct.py diff --git a/scripts/make_mllm_instruct.py b/scripts/make_mllm_instruct.py deleted file mode 100644 index 41e13b8e..00000000 --- a/scripts/make_mllm_instruct.py +++ /dev/null @@ -1,95 +0,0 @@ -import json -import os.path - -import fire -from datasets import Dataset, concatenate_datasets, load_dataset, Value, Image, Features, Sequence - -"""usage -python3 scripts/make_mllm_instruct.py \ ---json_path data/llava_instruct_example.json \ ---image_path data/images \ ---output_path data/mllm_example_dataset -""" - - -def make_one_json(json_path, image_path) -> Dataset: - with open(json_path) as f: - raw_data_ls = json.loads(f.read()) - data_ls = [] - for i, data in enumerate(raw_data_ls): - for j, message in enumerate(data['messages']): - text = message['content'] - message['content'] = [{'index': None, 'text': text, 'type': 'text'}] - if j == 0: - message['content'].append({'index': 0, 'text': None, 'type': 'image'}) - image = data['image'] - if image_path: - image = os.path.join(image_path, data['image']) - data['images'] = [image] - del data['image'] - data_ls.append(data) - - def gen(): - for data in data_ls: - yield data - - features = Features({'messages': [{'content': [ - {'index': Value(dtype='int64', id=None), 'text': Value(dtype='string', id=None), - 'type': Value(dtype='string', id=None)}], 'role': Value(dtype='string', id=None)}], - 'images': Sequence(feature=Image(decode=True, id=None), length=-1, id=None)}) - dataset = Dataset.from_generator(gen, features=features) - return dataset - - -yaml_content = """--- -dataset_info: - features: - - name: messages - list: - - name: content - list: - - name: index - dtype: int64 - - name: text - dtype: string - - name: type - dtype: string - - name: role - dtype: string - - name: images - sequence: image -configs: -- config_name: default - data_files: - - split: train - path: data/train-* - - split: test - path: data/test-* ----""" - - -def main( - json_path: str, - image_path: str, - output_path: str, -): - json_path_list = json_path.split() - dataset_list = [] - for json_path in json_path_list: - dataset = make_one_json(json_path, image_path) - dataset_list.append(dataset) - dataset = concatenate_datasets(dataset_list) - print(dataset[0]) - data_path = os.path.join(output_path, "data") - os.makedirs(os.path.join(data_path), exist_ok=True) - parquet_path = os.path.join(data_path, "train-0.parquet") - dataset.to_parquet(parquet_path) - parquet_path = os.path.join(data_path, "test-0.parquet") - dataset.to_parquet(parquet_path) - readme_path = os.path.join(output_path, "README.md") - with open(readme_path, 'w') as f: - f.write(yaml_content) - - -if __name__ == '__main__': - fire.Fire(main) From 2d4ded535faa44b460f88028d49e4b8c8b430db5 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 21:58:18 +0800 Subject: [PATCH 16/34] modify some style --- src/llmtuner/data/aligner.py | 24 ++----- src/llmtuner/data/preprocess.py | 43 +++--------- src/llmtuner/data/template.py | 91 ++----------------------- src/llmtuner/hparams/finetuning_args.py | 2 +- src/llmtuner/model/loader.py | 4 +- src/llmtuner/train/sft/workflow.py | 20 ++---- 6 files changed, 26 insertions(+), 158 deletions(-) diff --git a/src/llmtuner/data/aligner.py b/src/llmtuner/data/aligner.py index 9d440aff..17b9fc6d 100644 --- a/src/llmtuner/data/aligner.py +++ b/src/llmtuner/data/aligner.py @@ -82,10 +82,7 @@ def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" raise ValueError("Invalid role tag in {}.".format(messages)) aligned_messages.append( - { - "role": tag_mapping[message[dataset_attr.role_tag]], - "content": message[dataset_attr.content_tag], - } + {"role": tag_mapping[message[dataset_attr.role_tag]], "content": message[dataset_attr.content_tag]} ) outputs["prompt"].append(aligned_messages[:-1]) @@ -126,10 +123,7 @@ def convert_llava(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") - raise ValueError("Invalid role tag in {}.".format(messages)) aligned_messages.append( - { - "role": tag_mapping[message[dataset_attr.role_tag]], - "content": message[dataset_attr.content_tag], - } + {"role": tag_mapping[message[dataset_attr.role_tag]], "content": message[dataset_attr.content_tag]} ) outputs["prompt"].append(aligned_messages[:-1]) @@ -143,9 +137,7 @@ def convert_llava(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") - def align_dataset( - dataset: Union["Dataset", "IterableDataset"], - dataset_attr: "DatasetAttr", - data_args: "DataArguments", + dataset: Union["Dataset", "IterableDataset"], dataset_attr: "DatasetAttr", data_args: "DataArguments" ) -> Union["Dataset", "IterableDataset"]: r""" Aligned dataset: @@ -165,16 +157,10 @@ def align_dataset( features = Features.from_dict( { "prompt": [ - { - "role": {"dtype": "string", "_type": "Value"}, - "content": {"dtype": "string", "_type": "Value"}, - } + {"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}} ], "response": [ - { - "role": {"dtype": "string", "_type": "Value"}, - "content": {"dtype": "string", "_type": "Value"}, - } + {"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}} ], "system": {"dtype": "string", "_type": "Value"}, "tools": {"dtype": "string", "_type": "Value"}, diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 1c8c64a6..51af8060 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -14,14 +14,11 @@ if TYPE_CHECKING: from ..hparams import DataArguments from .template import Template - logger = get_logger(__name__) def preprocess_pretrain_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - data_args: "DataArguments", + examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" ) -> Dict[str, List[List[int]]]: # build grouped texts with format `X1 X2 X3 ...` if packing is enabled text_examples = [messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"]] @@ -56,11 +53,7 @@ def preprocess_supervised_dataset( ) -> Dict[str, List[List[int]]]: # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. - model_inputs = { - "input_ids": [], - "attention_mask": [], - "labels": [], - } + model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} for i in range(len(examples["prompt"])): if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: @@ -154,12 +147,7 @@ def preprocess_multimodal_supervised_dataset( # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. tokenizer = processor.tokenizer - model_inputs = { - "input_ids": [], - "attention_mask": [], - "labels": [], - "pixel_values": [], - } + model_inputs = {"input_ids": [], "attention_mask": [], "labels": [], "pixel_values": []} for i in range(len(examples["prompt"])): if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: @@ -284,10 +272,7 @@ def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: " print("label_ids:\n{}".format(example["labels"])) print( "labels:\n{}".format( - tokenizer.decode( - list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), - skip_special_tokens=False, - ) + tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False) ) ) @@ -320,33 +305,21 @@ def get_preprocess_and_print_func( elif stage == "sft" and not training_args.predict_with_generate: if data_args.packing: preprocess_func = partial( - preprocess_packed_supervised_dataset, - tokenizer=tokenizer, - template=template, - data_args=data_args, + preprocess_packed_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args ) elif processor is not None: preprocess_func = partial( - preprocess_multimodal_supervised_dataset, - processor=processor, - template=template, - data_args=data_args, + preprocess_multimodal_supervised_dataset, processor=processor, template=template, data_args=data_args ) else: preprocess_func = partial( - preprocess_supervised_dataset, - tokenizer=tokenizer, - template=template, - data_args=data_args, + preprocess_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args ) print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer) elif stage == "rm": preprocess_func = partial( - preprocess_pairwise_dataset, - tokenizer=tokenizer, - template=template, - data_args=data_args, + preprocess_pairwise_dataset, tokenizer=tokenizer, template=template, data_args=data_args ) print_function = partial(print_pairwise_dataset_example, tokenizer=tokenizer) else: diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index cf21e932..f798ba5a 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -11,7 +11,6 @@ if TYPE_CHECKING: from .formatter import SLOTS, Formatter - logger = get_logger(__name__) @@ -368,8 +367,7 @@ def get_template_and_fix_tokenizer( if stop_words: num_added_tokens = tokenizer.add_special_tokens( - dict(additional_special_tokens=stop_words), - replace_additional_special_tokens=False, + dict(additional_special_tokens=stop_words), replace_additional_special_tokens=False ) logger.info("Add {} to stop words.".format(",".join(stop_words))) if num_added_tokens > 0: @@ -393,7 +391,6 @@ _register_template( ), ) - _register_template( name="aquila", format_user=StringFormatter(slots=["Human: {{content}}###Assistant:"]), @@ -406,36 +403,26 @@ _register_template( efficient_eos=True, ) - _register_template( name="atom", format_user=StringFormatter( - slots=[ - {"bos_token"}, - "Human: {{content}}\n", - {"eos_token"}, - {"bos_token"}, - "Assistant:", - ] + slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"] ), format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]), ) - _register_template( name="baichuan", format_user=StringFormatter(slots=[{"token": ""}, "{{content}}", {"token": ""}]), efficient_eos=True, ) - _register_template( name="baichuan2", format_user=StringFormatter(slots=["{{content}}"]), efficient_eos=True, ) - _register_template( name="belle", format_user=StringFormatter(slots=["Human: {{content}}\n\nBelle: "]), @@ -444,13 +431,11 @@ _register_template( force_system=True, ) - _register_template( name="bluelm", format_user=StringFormatter(slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}]), ) - _register_template( name="breeze", format_user=StringFormatter(slots=["[INST] {{content}} [/INST] "]), @@ -462,7 +447,6 @@ _register_template( efficient_eos=True, ) - _register_template( name="chatglm2", format_user=StringFormatter(slots=["[Round {{idx}}]\n\n问:{{content}}\n\n答:"]), @@ -472,7 +456,6 @@ _register_template( force_system=True, ) - _register_template( name="chatglm3", format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), @@ -480,40 +463,23 @@ _register_template( format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]), format_observation=StringFormatter( - slots=[ - {"token": "<|observation|>"}, - "\n", - "{{content}}", - {"token": "<|assistant|>"}, - ] + slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] ), stop_words=["<|user|>", "<|observation|>"], efficient_eos=True, force_system=True, ) - _register_template( name="chatglm3_system", format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), format_assistant=StringFormatter(slots=["\n", "{{content}}"]), format_system=StringFormatter( - slots=[ - {"token": "[gMASK]"}, - {"token": "sop"}, - {"token": "<|system|>"}, - "\n", - "{{content}}", - ] + slots=[{"token": "[gMASK]"}, {"token": "sop"}, {"token": "<|system|>"}, "\n", "{{content}}"] ), format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]), format_observation=StringFormatter( - slots=[ - {"token": "<|observation|>"}, - "\n", - "{{content}}", - {"token": "<|assistant|>"}, - ] + slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] ), default_system=( "You are ChatGLM3, a large language model trained by Zhipu.AI. " @@ -523,7 +489,6 @@ _register_template( efficient_eos=True, ) - _register_template( name="chatml", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -534,7 +499,6 @@ _register_template( replace_eos=True, ) - _register_template( name="chatml_de", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -546,14 +510,12 @@ _register_template( replace_eos=True, ) - _register_template( name="codegeex2", format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), force_system=True, ) - _register_template( name="cohere", format_user=StringFormatter( @@ -568,7 +530,6 @@ _register_template( force_system=True, ) - _register_template( name="cpm", format_user=StringFormatter(slots=["<用户>{{content}}"]), @@ -576,7 +537,6 @@ _register_template( force_system=True, ) - _register_template( name="dbrx", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -602,7 +562,6 @@ _register_template( replace_eos=True, ) - _register_template( name="deepseek", format_user=StringFormatter(slots=["User: {{content}}\n\nAssistant:"]), @@ -610,7 +569,6 @@ _register_template( force_system=True, ) - _register_template( name="deepseekcoder", format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n### Response:"]), @@ -626,7 +584,6 @@ _register_template( efficient_eos=True, ) - _register_template( name="default", format_user=StringFormatter(slots=["Human: {{content}}\nAssistant: "]), @@ -634,14 +591,12 @@ _register_template( format_separator=EmptyFormatter(slots=["\n"]), ) - _register_template( name="empty", format_user=StringFormatter(slots=["{{content}}"]), format_assistant=StringFormatter(slots=["{{content}}"]), ) - _register_template( name="falcon", format_user=StringFormatter(slots=["User: {{content}}\nFalcon:"]), @@ -649,14 +604,12 @@ _register_template( efficient_eos=True, ) - _register_template( name="fewshot", format_separator=EmptyFormatter(slots=["\n\n"]), efficient_eos=True, ) - _register_template( name="gemma", format_user=StringFormatter(slots=["user\n{{content}}\nmodel\n"]), @@ -669,7 +622,6 @@ _register_template( force_system=True, ) - _register_template( name="intern", format_user=StringFormatter(slots=["<|User|>:{{content}}", {"token": ""}, "\n<|Bot|>:"]), @@ -678,7 +630,6 @@ _register_template( efficient_eos=True, ) - _register_template( name="intern2", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -695,7 +646,6 @@ _register_template( efficient_eos=True, # internlm2 tokenizer cannot set eos_token_id ) - _register_template( name="llama2", format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]), @@ -712,7 +662,6 @@ _register_template( ), ) - _register_template( name="llama2_zh", format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]), @@ -720,7 +669,6 @@ _register_template( default_system="You are a helpful assistant. 你是一个乐于助人的助手。", ) - _register_template( name="llama3", format_user=StringFormatter( @@ -732,10 +680,7 @@ _register_template( ] ), format_system=StringFormatter( - slots=[ - {"bos_token"}, - "<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>", - ] + slots=[{"bos_token"}, "<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"] ), format_observation=StringFormatter( slots=[ @@ -750,7 +695,6 @@ _register_template( replace_eos=True, ) - _register_template( name="mistral", format_user=StringFormatter(slots=[" [INST] {{content}} [/INST]"]), @@ -758,7 +702,6 @@ _register_template( force_system=True, ) - _register_template( name="olmo", format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]), @@ -767,22 +710,14 @@ _register_template( force_system=True, ) - _register_template( name="openchat", - format_user=StringFormatter( - slots=[ - "GPT4 Correct User: {{content}}", - {"eos_token"}, - "GPT4 Correct Assistant:", - ] - ), + format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]), format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]), format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]), force_system=True, ) - _register_template( name="orion", format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}]), @@ -790,7 +725,6 @@ _register_template( force_system=True, ) - _register_template( name="phi", format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"]), @@ -802,7 +736,6 @@ _register_template( replace_eos=True, ) - _register_template( name="qwen", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -814,7 +747,6 @@ _register_template( replace_eos=True, ) - _register_template( name="solar", format_user=StringFormatter(slots=["### User:\n{{content}}\n\n### Assistant:\n"]), @@ -822,7 +754,6 @@ _register_template( efficient_eos=True, ) - _register_template( name="starchat", format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>"]), @@ -833,7 +764,6 @@ _register_template( force_system=True, ) - _register_template( name="vicuna", format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]), @@ -843,7 +773,6 @@ _register_template( ), ) - _register_template( name="xuanyuan", format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]), @@ -854,13 +783,11 @@ _register_template( ), ) - _register_template( name="xverse", format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: "]), ) - _register_template( name="yayi", format_user=StringFormatter(slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"]), @@ -880,7 +807,6 @@ _register_template( stop_words=["<|End|>"], ) - _register_template( name="yi", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -889,7 +815,6 @@ _register_template( replace_eos=True, ) - _register_template( name="yuan", format_user=StringFormatter(slots=["{{content}}", {"token": ""}]), @@ -898,7 +823,6 @@ _register_template( replace_eos=True, ) - _register_template( name="zephyr", format_user=StringFormatter(slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"]), @@ -907,7 +831,6 @@ _register_template( default_system="You are a friendly chatbot who always responds in the style of a pirate", ) - _register_template( name="ziya", format_user=StringFormatter(slots=[":{{content}}\n:"]), diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index cb525699..f4f71bc5 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -260,7 +260,7 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments, GaloreA default=False, metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."}, ) - stage: Literal["pt", "sft", "rm", "ppo", "dpo", "orpo", "sft_mm"] = field( + stage: Literal["pt", "sft", "rm", "ppo", "dpo", "orpo"] = field( default="sft", metadata={"help": "Which stage will be performed in training."}, ) diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index dd7eb44c..5b5c0a4d 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -41,9 +41,7 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]: } -def load_tokenizer( - model_args: "ModelArguments", -) -> Dict[str, Union["PreTrainedTokenizer", "AutoProcessor"]]: +def load_tokenizer(model_args: "ModelArguments") -> Dict[str, Union["PreTrainedTokenizer", "AutoProcessor"]]: r""" Loads pretrained tokenizer. diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index 50833a99..205142e5 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -17,12 +17,7 @@ from .trainer import CustomSeq2SeqTrainer if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments, TrainerCallback - from ...hparams import ( - DataArguments, - FinetuningArguments, - GeneratingArguments, - ModelArguments, - ) + from ...hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments def run_sft( @@ -36,14 +31,7 @@ def run_sft( tokenizer_modules = load_tokenizer(model_args) tokenizer = tokenizer_modules["tokenizer"] processor = tokenizer_modules["processor"] - dataset = get_dataset( - tokenizer, - model_args, - data_args, - training_args, - stage="sft", - processor=processor, - ) + dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="sft", processor=processor) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) if training_args.predict_with_generate: @@ -54,7 +42,7 @@ def run_sft( data_collator = DataCollatorForSeq2Seq( tokenizer=tokenizer, - pad_to_multiple_of=(8 if tokenizer.padding_side == "right" else None), # for shift short attention + pad_to_multiple_of=8 if tokenizer.padding_side == "right" else None, # for shift short attention label_pad_token_id=(IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id), ) @@ -72,7 +60,7 @@ def run_sft( tokenizer=tokenizer, data_collator=data_collator, callbacks=callbacks, - compute_metrics=(ComputeMetrics(tokenizer) if training_args.predict_with_generate else None), + compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None, **split_dataset(dataset, data_args, training_args), ) From c27f7fbf62b4a00b5794bb20621a4060a82490b7 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 22:04:09 +0800 Subject: [PATCH 17/34] modify some style --- src/llmtuner/data/preprocess.py | 5 +-- src/llmtuner/data/template.py | 50 ++++++++++++++++++++++++++++-- src/llmtuner/train/sft/workflow.py | 2 +- src/llmtuner/train/tuner.py | 1 + 4 files changed, 50 insertions(+), 8 deletions(-) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 51af8060..9cdcdfa2 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -324,10 +324,7 @@ def get_preprocess_and_print_func( print_function = partial(print_pairwise_dataset_example, tokenizer=tokenizer) else: preprocess_func = partial( - preprocess_unsupervised_dataset, - tokenizer=tokenizer, - template=template, - data_args=data_args, + preprocess_unsupervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args ) print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index f798ba5a..9a3673c3 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -11,6 +11,7 @@ if TYPE_CHECKING: from .formatter import SLOTS, Formatter + logger = get_logger(__name__) @@ -103,9 +104,7 @@ class Template: return self._make_pairs(encoded_messages, cutoff_len, reserved_label_len) def _convert_elements_to_ids( - self, - tokenizer: "PreTrainedTokenizer", - elements: List[Union[str, Dict[str, str]]], + self, tokenizer: "PreTrainedTokenizer", elements: List[Union[str, Dict[str, str]]] ) -> List[int]: r""" Converts elements to token ids. @@ -391,6 +390,7 @@ _register_template( ), ) + _register_template( name="aquila", format_user=StringFormatter(slots=["Human: {{content}}###Assistant:"]), @@ -403,6 +403,7 @@ _register_template( efficient_eos=True, ) + _register_template( name="atom", format_user=StringFormatter( @@ -411,18 +412,21 @@ _register_template( format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]), ) + _register_template( name="baichuan", format_user=StringFormatter(slots=[{"token": ""}, "{{content}}", {"token": ""}]), efficient_eos=True, ) + _register_template( name="baichuan2", format_user=StringFormatter(slots=["{{content}}"]), efficient_eos=True, ) + _register_template( name="belle", format_user=StringFormatter(slots=["Human: {{content}}\n\nBelle: "]), @@ -431,11 +435,13 @@ _register_template( force_system=True, ) + _register_template( name="bluelm", format_user=StringFormatter(slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}]), ) + _register_template( name="breeze", format_user=StringFormatter(slots=["[INST] {{content}} [/INST] "]), @@ -447,6 +453,7 @@ _register_template( efficient_eos=True, ) + _register_template( name="chatglm2", format_user=StringFormatter(slots=["[Round {{idx}}]\n\n问:{{content}}\n\n答:"]), @@ -456,6 +463,7 @@ _register_template( force_system=True, ) + _register_template( name="chatglm3", format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), @@ -470,6 +478,7 @@ _register_template( force_system=True, ) + _register_template( name="chatglm3_system", format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), @@ -489,6 +498,7 @@ _register_template( efficient_eos=True, ) + _register_template( name="chatml", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -499,6 +509,7 @@ _register_template( replace_eos=True, ) + _register_template( name="chatml_de", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -510,12 +521,14 @@ _register_template( replace_eos=True, ) + _register_template( name="codegeex2", format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]), force_system=True, ) + _register_template( name="cohere", format_user=StringFormatter( @@ -530,6 +543,7 @@ _register_template( force_system=True, ) + _register_template( name="cpm", format_user=StringFormatter(slots=["<用户>{{content}}"]), @@ -537,6 +551,7 @@ _register_template( force_system=True, ) + _register_template( name="dbrx", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -562,6 +577,7 @@ _register_template( replace_eos=True, ) + _register_template( name="deepseek", format_user=StringFormatter(slots=["User: {{content}}\n\nAssistant:"]), @@ -569,6 +585,7 @@ _register_template( force_system=True, ) + _register_template( name="deepseekcoder", format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n### Response:"]), @@ -584,6 +601,7 @@ _register_template( efficient_eos=True, ) + _register_template( name="default", format_user=StringFormatter(slots=["Human: {{content}}\nAssistant: "]), @@ -591,12 +609,14 @@ _register_template( format_separator=EmptyFormatter(slots=["\n"]), ) + _register_template( name="empty", format_user=StringFormatter(slots=["{{content}}"]), format_assistant=StringFormatter(slots=["{{content}}"]), ) + _register_template( name="falcon", format_user=StringFormatter(slots=["User: {{content}}\nFalcon:"]), @@ -604,12 +624,14 @@ _register_template( efficient_eos=True, ) + _register_template( name="fewshot", format_separator=EmptyFormatter(slots=["\n\n"]), efficient_eos=True, ) + _register_template( name="gemma", format_user=StringFormatter(slots=["user\n{{content}}\nmodel\n"]), @@ -622,6 +644,7 @@ _register_template( force_system=True, ) + _register_template( name="intern", format_user=StringFormatter(slots=["<|User|>:{{content}}", {"token": ""}, "\n<|Bot|>:"]), @@ -630,6 +653,7 @@ _register_template( efficient_eos=True, ) + _register_template( name="intern2", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -646,6 +670,7 @@ _register_template( efficient_eos=True, # internlm2 tokenizer cannot set eos_token_id ) + _register_template( name="llama2", format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]), @@ -662,6 +687,7 @@ _register_template( ), ) + _register_template( name="llama2_zh", format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]), @@ -669,6 +695,7 @@ _register_template( default_system="You are a helpful assistant. 你是一个乐于助人的助手。", ) + _register_template( name="llama3", format_user=StringFormatter( @@ -695,6 +722,7 @@ _register_template( replace_eos=True, ) + _register_template( name="mistral", format_user=StringFormatter(slots=[" [INST] {{content}} [/INST]"]), @@ -702,6 +730,7 @@ _register_template( force_system=True, ) + _register_template( name="olmo", format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]), @@ -710,6 +739,7 @@ _register_template( force_system=True, ) + _register_template( name="openchat", format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]), @@ -718,6 +748,7 @@ _register_template( force_system=True, ) + _register_template( name="orion", format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}]), @@ -725,6 +756,7 @@ _register_template( force_system=True, ) + _register_template( name="phi", format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"]), @@ -736,6 +768,7 @@ _register_template( replace_eos=True, ) + _register_template( name="qwen", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -747,6 +780,7 @@ _register_template( replace_eos=True, ) + _register_template( name="solar", format_user=StringFormatter(slots=["### User:\n{{content}}\n\n### Assistant:\n"]), @@ -754,6 +788,7 @@ _register_template( efficient_eos=True, ) + _register_template( name="starchat", format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>"]), @@ -764,6 +799,7 @@ _register_template( force_system=True, ) + _register_template( name="vicuna", format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]), @@ -773,6 +809,7 @@ _register_template( ), ) + _register_template( name="xuanyuan", format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]), @@ -783,11 +820,13 @@ _register_template( ), ) + _register_template( name="xverse", format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: "]), ) + _register_template( name="yayi", format_user=StringFormatter(slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"]), @@ -807,6 +846,7 @@ _register_template( stop_words=["<|End|>"], ) + _register_template( name="yi", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), @@ -815,6 +855,7 @@ _register_template( replace_eos=True, ) + _register_template( name="yuan", format_user=StringFormatter(slots=["{{content}}", {"token": ""}]), @@ -823,6 +864,7 @@ _register_template( replace_eos=True, ) + _register_template( name="zephyr", format_user=StringFormatter(slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"]), @@ -831,12 +873,14 @@ _register_template( default_system="You are a friendly chatbot who always responds in the style of a pirate", ) + _register_template( name="ziya", format_user=StringFormatter(slots=[":{{content}}\n:"]), format_separator=EmptyFormatter(slots=["\n"]), ) + _register_template( name="llava", format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT: "]), diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index 205142e5..c5acb4bc 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -43,7 +43,7 @@ def run_sft( data_collator = DataCollatorForSeq2Seq( tokenizer=tokenizer, pad_to_multiple_of=8 if tokenizer.padding_side == "right" else None, # for shift short attention - label_pad_token_id=(IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id), + label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id, ) # Override the decoding parameters of Seq2SeqTrainer diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index e1999946..a8a2b8e9 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -19,6 +19,7 @@ from .sft import run_sft if TYPE_CHECKING: from transformers import TrainerCallback + logger = get_logger(__name__) From 31420f7b314d1bc0d7918196d4b23524323a01ae Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 22:35:17 +0800 Subject: [PATCH 18/34] merge some func --- scripts/test_mllm.py | 99 --------------------------------- src/llmtuner/data/aligner.py | 51 +---------------- src/llmtuner/data/preprocess.py | 65 +++------------------- 3 files changed, 10 insertions(+), 205 deletions(-) delete mode 100644 scripts/test_mllm.py diff --git a/scripts/test_mllm.py b/scripts/test_mllm.py deleted file mode 100644 index b8fe3e0f..00000000 --- a/scripts/test_mllm.py +++ /dev/null @@ -1,99 +0,0 @@ -import os.path - -import fire -import torch -from datasets import load_dataset -from peft import PeftModel -from transformers import AutoTokenizer, AutoModelForVision2Seq, AutoProcessor -import shutil -from PIL import Image - -"""usage -python3 scripts/test_mllm.py \ ---base_model_path llava-hf/llava-1.5-7b-hf \ ---lora_model_path saves/llava-1.5-7b/lora/sft \ ---model_path saves/llava-1.5-7b/lora/merged \ ---dataset_name data/llava_instruct_example.json \ ---do_merge 1 -""" - - -def get_processor(model_path): - processor = AutoProcessor.from_pretrained(model_path) - CHAT_TEMPLATE = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {{ message['content'] }} ASSISTANT: {% else %}{{ message['content'] }}{% endif %} {% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}""" - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) - tokenizer.chat_template = CHAT_TEMPLATE - processor.tokenizer = tokenizer - return processor - - -def apply_lora(base_model_path, model_path, lora_path): - print(f"Loading the base model from {base_model_path}") - base_model = AutoModelForVision2Seq.from_pretrained( - base_model_path, - torch_dtype=torch.float16, - low_cpu_mem_usage=True, - device_map="cuda", - ) - processor = get_processor(base_model_path) - tokenizer = processor.tokenizer - print(f"Loading the LoRA adapter from {lora_path}") - - lora_model = PeftModel.from_pretrained( - base_model, - lora_path, - torch_dtype=torch.float16, - ) - - print("Applying the LoRA") - model = lora_model.merge_and_unload() - - print(f"Saving the target model to {model_path}") - model.save_pretrained(model_path) - tokenizer.save_pretrained(model_path) - processor.image_processor.save_pretrained(model_path) - - -def main( - model_path: str, - dataset_name: str, - base_model_path: str = "", - lora_model_path: str = "", - do_merge: bool = False, -): - if not os.path.exists(model_path) or do_merge: - apply_lora(base_model_path, model_path, lora_model_path) - model = AutoModelForVision2Seq.from_pretrained( - model_path, - torch_dtype=torch.bfloat16, - low_cpu_mem_usage=True, - device_map="cuda", - ) - processor = get_processor(model_path) - raw_datasets = load_dataset("json", data_files=dataset_name) - train_dataset = raw_datasets["train"] - examples = train_dataset.select(range(3)) - texts = [] - images = [] - for example in examples: - messages = example["messages"][:1] - text = processor.tokenizer.apply_chat_template( - messages, tokenize=False, add_generation_prompt=False - ) - texts.append(text) - images.append(Image.open(example["images"][0])) - batch = processor(text=texts, images=images, return_tensors="pt", padding=True).to( - "cuda" - ) - output = model.generate(**batch, max_new_tokens=100) - res_list = processor.batch_decode(output, skip_special_tokens=True) - for i, prompt in enumerate(texts): - res = res_list[i] - print(f"#{i}") - print(f"prompt:{prompt}") - print(f"response:{res[len(prompt):].strip()}") - print() - - -if __name__ == "__main__": - fire.Fire(main) diff --git a/src/llmtuner/data/aligner.py b/src/llmtuner/data/aligner.py index 17b9fc6d..6fd6f404 100644 --- a/src/llmtuner/data/aligner.py +++ b/src/llmtuner/data/aligner.py @@ -36,12 +36,7 @@ def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") {"role": Role.ASSISTANT.value, "content": content} for content in examples[dataset_attr.response][i] ] elif dataset_attr.response and isinstance(examples[dataset_attr.response][i], str): - response = [ - { - "role": Role.ASSISTANT.value, - "content": examples[dataset_attr.response][i], - } - ] + response = [{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.response][i]}] else: response = [] @@ -54,47 +49,6 @@ def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: - outputs = {"prompt": [], "response": [], "system": [], "tools": []} - tag_mapping = { - dataset_attr.user_tag: Role.USER.value, - dataset_attr.assistant_tag: Role.ASSISTANT.value, - dataset_attr.observation_tag: Role.OBSERVATION.value, - dataset_attr.function_tag: Role.FUNCTION.value, - dataset_attr.system_tag: Role.SYSTEM.value, - } - odd_tags = (dataset_attr.user_tag, dataset_attr.observation_tag) - even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag) - accept_tags = (odd_tags, even_tags) - for i, messages in enumerate(examples[dataset_attr.messages]): - if dataset_attr.system_tag and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag: - system = messages[0][dataset_attr.content_tag] - messages = messages[1:] - else: - system = examples[dataset_attr.system][i] if dataset_attr.system else "" - - messages = messages[: len(messages) // 2 * 2] # should be multiples of 2 - if len(messages) == 0: - continue - - aligned_messages = [] - for turn_idx, message in enumerate(messages): - if message[dataset_attr.role_tag] not in accept_tags[turn_idx % 2]: - raise ValueError("Invalid role tag in {}.".format(messages)) - - aligned_messages.append( - {"role": tag_mapping[message[dataset_attr.role_tag]], "content": message[dataset_attr.content_tag]} - ) - - outputs["prompt"].append(aligned_messages[:-1]) - outputs["response"].append(aligned_messages[-1:]) - outputs["system"].append(system) - outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "") - outputs["images"].append([]) - - return outputs - - -def convert_llava(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []} tag_mapping = { dataset_attr.user_tag: Role.USER.value, @@ -130,7 +84,6 @@ def convert_llava(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") - outputs["response"].append(aligned_messages[-1:]) outputs["system"].append(system) outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "") - print(examples[dataset_attr.images][i]) outputs["images"].append(examples[dataset_attr.images][i] if dataset_attr.images else []) return outputs @@ -148,8 +101,6 @@ def align_dataset( """ if dataset_attr.formatting == "alpaca": convert_func = partial(convert_alpaca, dataset_attr=dataset_attr) - elif dataset_attr.formatting == "llava": - convert_func = partial(convert_llava, dataset_attr=dataset_attr) else: convert_func = partial(convert_sharegpt, dataset_attr=dataset_attr) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 9cdcdfa2..6108b245 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -50,6 +50,7 @@ def preprocess_supervised_dataset( tokenizer: "PreTrainedTokenizer", template: "Template", data_args: "DataArguments", + processor: "AutoProcessor" = None, ) -> Dict[str, List[List[int]]]: # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. @@ -88,7 +89,9 @@ def preprocess_supervised_dataset( model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) - + if processor is not None and "images" in examples: + pixel_values = processor.image_processor(examples["images"][0], return_tensors="pt")["pixel_values"][0] + model_inputs["pixel_values"].append(pixel_values) return model_inputs @@ -138,55 +141,6 @@ def preprocess_packed_supervised_dataset( return model_inputs -def preprocess_multimodal_supervised_dataset( - examples: Dict[str, List[Any]], - processor: "AutoProcessor", - template: "Template", - data_args: "DataArguments", -) -> Dict[str, List[List[int]]]: - # build inputs with format ` X Y ` and labels with format ` ... Y ` - # for multiturn examples, we only mask the prompt part in each prompt-response pair. - tokenizer = processor.tokenizer - model_inputs = {"input_ids": [], "attention_mask": [], "labels": [], "pixel_values": []} - - for i in range(len(examples["prompt"])): - if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: - continue - - messages = examples["prompt"][i] + examples["response"][i] - input_ids, labels = [], [] - for turn_idx, (source_ids, target_ids) in enumerate( - template.encode_multiturn( - tokenizer, - messages, - examples["system"][i], - examples["tools"][i], - data_args.cutoff_len, - data_args.reserved_label_len, - ) - ): - if data_args.train_on_prompt: - source_mask = source_ids - elif turn_idx != 0 and template.efficient_eos: - source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) - else: - source_mask = [IGNORE_INDEX] * len(source_ids) - - input_ids += source_ids + target_ids - labels += source_mask + target_ids - - if template.efficient_eos: - input_ids += [tokenizer.eos_token_id] - labels += [tokenizer.eos_token_id] - - model_inputs["input_ids"].append(input_ids) - model_inputs["attention_mask"].append([1] * len(input_ids)) - model_inputs["labels"].append(labels) - pixel_values = processor.image_processor(examples["images"][0], return_tensors="pt")["pixel_values"][0] - model_inputs["pixel_values"].append(pixel_values) - return model_inputs - - def preprocess_unsupervised_dataset( examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", @@ -307,15 +261,14 @@ def get_preprocess_and_print_func( preprocess_func = partial( preprocess_packed_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args ) - elif processor is not None: - preprocess_func = partial( - preprocess_multimodal_supervised_dataset, processor=processor, template=template, data_args=data_args - ) else: preprocess_func = partial( - preprocess_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_supervised_dataset, + tokenizer=tokenizer, + template=template, + data_args=data_args, + processor=processor, ) - print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer) elif stage == "rm": preprocess_func = partial( From d29f3798f654c40448bc91e3b97db1c6e4de0fbe Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 22:40:25 +0800 Subject: [PATCH 19/34] modify some style --- src/llmtuner/data/preprocess.py | 76 ++++++++++++++++----------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 6108b245..3487b761 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -6,7 +6,6 @@ from ..extras.constants import IGNORE_INDEX from ..extras.logging import get_logger from .utils import Role - if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments from transformers.tokenization_utils import AutoProcessor, PreTrainedTokenizer @@ -18,7 +17,7 @@ logger = get_logger(__name__) def preprocess_pretrain_dataset( - examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" + examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" ) -> Dict[str, List[List[int]]]: # build grouped texts with format `X1 X2 X3 ...` if packing is enabled text_examples = [messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"]] @@ -35,7 +34,7 @@ def preprocess_pretrain_dataset( block_size = data_args.cutoff_len total_length = (total_length // block_size) * block_size result = { - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + k: [t[i: i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } if data_args.template == "gemma": @@ -46,11 +45,11 @@ def preprocess_pretrain_dataset( def preprocess_supervised_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", - processor: "AutoProcessor" = None, + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", + processor: "AutoProcessor" = None, ) -> Dict[str, List[List[int]]]: # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. @@ -63,14 +62,14 @@ def preprocess_supervised_dataset( messages = examples["prompt"][i] + examples["response"][i] input_ids, labels = [], [] for turn_idx, (source_ids, target_ids) in enumerate( - template.encode_multiturn( - tokenizer, - messages, - examples["system"][i], - examples["tools"][i], - data_args.cutoff_len, - data_args.reserved_label_len, - ) + template.encode_multiturn( + tokenizer, + messages, + examples["system"][i], + examples["tools"][i], + data_args.cutoff_len, + data_args.reserved_label_len, + ) ): if data_args.train_on_prompt: source_mask = source_ids @@ -96,10 +95,10 @@ def preprocess_supervised_dataset( def preprocess_packed_supervised_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format ` X1 Y1 X2 Y2 ` # and labels with format ` ... Y1 ... Y2 ` @@ -111,7 +110,7 @@ def preprocess_packed_supervised_dataset( messages = examples["prompt"][i] + examples["response"][i] for source_ids, target_ids in template.encode_multiturn( - tokenizer, messages, examples["system"][i], examples["tools"][i] + tokenizer, messages, examples["system"][i], examples["tools"][i] ): if data_args.train_on_prompt: source_mask = source_ids @@ -133,19 +132,19 @@ def preprocess_packed_supervised_dataset( total_length = (total_length // block_size) * block_size # split by chunks of cutoff_len for i in range(0, total_length, block_size): - if not all(label == IGNORE_INDEX for label in labels[i : i + block_size]): - model_inputs["input_ids"].append(input_ids[i : i + block_size]) + if not all(label == IGNORE_INDEX for label in labels[i: i + block_size]): + model_inputs["input_ids"].append(input_ids[i: i + block_size]) model_inputs["attention_mask"].append([1] * block_size) - model_inputs["labels"].append(labels[i : i + block_size]) + model_inputs["labels"].append(labels[i: i + block_size]) return model_inputs def preprocess_unsupervised_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format ` X` and labels with format `Y ` model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} @@ -179,10 +178,10 @@ def preprocess_unsupervised_dataset( def preprocess_pairwise_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build input pairs with format ` X`, `Y1 ` and `Y2 ` model_inputs = {"prompt_ids": [], "chosen_ids": [], "rejected_ids": []} @@ -246,12 +245,12 @@ def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: def get_preprocess_and_print_func( - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - stage: Literal["pt", "sft", "rm", "ppo"], - processor: Optional["AutoProcessor"] = None, + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + stage: Literal["pt", "sft", "rm", "ppo"], + processor: Optional["AutoProcessor"] = None, ) -> Tuple[Callable, Callable]: if stage == "pt": preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args) @@ -280,5 +279,4 @@ def get_preprocess_and_print_func( preprocess_unsupervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args ) print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) - return preprocess_func, print_function From ece78a6d6af0673795824b2f95c266c042532eb3 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 22:40:53 +0800 Subject: [PATCH 20/34] modify some style --- src/llmtuner/data/preprocess.py | 75 +++++++++++++++++---------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 3487b761..59b49b9d 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -6,6 +6,7 @@ from ..extras.constants import IGNORE_INDEX from ..extras.logging import get_logger from .utils import Role + if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments from transformers.tokenization_utils import AutoProcessor, PreTrainedTokenizer @@ -17,7 +18,7 @@ logger = get_logger(__name__) def preprocess_pretrain_dataset( - examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" + examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" ) -> Dict[str, List[List[int]]]: # build grouped texts with format `X1 X2 X3 ...` if packing is enabled text_examples = [messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"]] @@ -34,7 +35,7 @@ def preprocess_pretrain_dataset( block_size = data_args.cutoff_len total_length = (total_length // block_size) * block_size result = { - k: [t[i: i + block_size] for i in range(0, total_length, block_size)] + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } if data_args.template == "gemma": @@ -45,11 +46,11 @@ def preprocess_pretrain_dataset( def preprocess_supervised_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", - processor: "AutoProcessor" = None, + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", + processor: "AutoProcessor" = None, ) -> Dict[str, List[List[int]]]: # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. @@ -62,14 +63,14 @@ def preprocess_supervised_dataset( messages = examples["prompt"][i] + examples["response"][i] input_ids, labels = [], [] for turn_idx, (source_ids, target_ids) in enumerate( - template.encode_multiturn( - tokenizer, - messages, - examples["system"][i], - examples["tools"][i], - data_args.cutoff_len, - data_args.reserved_label_len, - ) + template.encode_multiturn( + tokenizer, + messages, + examples["system"][i], + examples["tools"][i], + data_args.cutoff_len, + data_args.reserved_label_len, + ) ): if data_args.train_on_prompt: source_mask = source_ids @@ -95,10 +96,10 @@ def preprocess_supervised_dataset( def preprocess_packed_supervised_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format ` X1 Y1 X2 Y2 ` # and labels with format ` ... Y1 ... Y2 ` @@ -110,7 +111,7 @@ def preprocess_packed_supervised_dataset( messages = examples["prompt"][i] + examples["response"][i] for source_ids, target_ids in template.encode_multiturn( - tokenizer, messages, examples["system"][i], examples["tools"][i] + tokenizer, messages, examples["system"][i], examples["tools"][i] ): if data_args.train_on_prompt: source_mask = source_ids @@ -132,19 +133,19 @@ def preprocess_packed_supervised_dataset( total_length = (total_length // block_size) * block_size # split by chunks of cutoff_len for i in range(0, total_length, block_size): - if not all(label == IGNORE_INDEX for label in labels[i: i + block_size]): - model_inputs["input_ids"].append(input_ids[i: i + block_size]) + if not all(label == IGNORE_INDEX for label in labels[i : i + block_size]): + model_inputs["input_ids"].append(input_ids[i : i + block_size]) model_inputs["attention_mask"].append([1] * block_size) - model_inputs["labels"].append(labels[i: i + block_size]) + model_inputs["labels"].append(labels[i : i + block_size]) return model_inputs def preprocess_unsupervised_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format ` X` and labels with format `Y ` model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} @@ -178,10 +179,10 @@ def preprocess_unsupervised_dataset( def preprocess_pairwise_dataset( - examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", + examples: Dict[str, List[Any]], + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build input pairs with format ` X`, `Y1 ` and `Y2 ` model_inputs = {"prompt_ids": [], "chosen_ids": [], "rejected_ids": []} @@ -245,12 +246,12 @@ def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: def get_preprocess_and_print_func( - tokenizer: "PreTrainedTokenizer", - template: "Template", - data_args: "DataArguments", - training_args: "Seq2SeqTrainingArguments", - stage: Literal["pt", "sft", "rm", "ppo"], - processor: Optional["AutoProcessor"] = None, + tokenizer: "PreTrainedTokenizer", + template: "Template", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + stage: Literal["pt", "sft", "rm", "ppo"], + processor: Optional["AutoProcessor"] = None, ) -> Tuple[Callable, Callable]: if stage == "pt": preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args) From a7ead1440fefaced1a68be46b8bf7b19a5ea47c2 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 22:59:46 +0800 Subject: [PATCH 21/34] modify some bug --- src/llmtuner/data/preprocess.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 59b49b9d..be566a5b 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -91,6 +91,8 @@ def preprocess_supervised_dataset( model_inputs["labels"].append(labels) if processor is not None and "images" in examples: pixel_values = processor.image_processor(examples["images"][0], return_tensors="pt")["pixel_values"][0] + if "pixel_values" not in model_inputs: + model_inputs["pixel_values"] = [] model_inputs["pixel_values"].append(pixel_values) return model_inputs From 646a7885e7c721342b7f546ed717f1d4a68bc012 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 02:20:47 +0800 Subject: [PATCH 22/34] delete llava template (use vicuna) --- src/llmtuner/data/template.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index 9a3673c3..73b22eb7 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -879,14 +879,3 @@ _register_template( format_user=StringFormatter(slots=[":{{content}}\n:"]), format_separator=EmptyFormatter(slots=["\n"]), ) - - -_register_template( - name="llava", - format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT: "]), - format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]), - default_system=( - "A chat between a curious user and an artificial intelligence assistant. " - "The assistant gives helpful, detailed, and polite answers to the user's questions." - ), -) From 860549b99b520ae9509559b18646f83fd95933b8 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 02:49:39 +0800 Subject: [PATCH 23/34] update hparam name --- src/llmtuner/hparams/model_args.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 97b908e4..be65cd27 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -81,6 +81,10 @@ class ModelArguments: default=False, metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."}, ) + visual_inputs: bool = field( + default=False, + metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."}, + ) moe_aux_loss_coef: Optional[float] = field( default=None, metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, @@ -169,10 +173,6 @@ class ModelArguments: default=False, metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) - use_mllm: bool = field( - default=False, - metadata={"help": "Whether use Multimodal LLM."}, - ) def __post_init__(self): self.compute_dtype = None From 7dcae3dba3dbda4953a0e3993e279cc8c21fc976 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 02:57:54 +0800 Subject: [PATCH 24/34] Update and rename llava_instruct_example.json to mllm_demo.json --- data/{llava_instruct_example.json => mllm_demo.json} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename data/{llava_instruct_example.json => mllm_demo.json} (100%) diff --git a/data/llava_instruct_example.json b/data/mllm_demo.json similarity index 100% rename from data/llava_instruct_example.json rename to data/mllm_demo.json From 5ef293387f8bded42364984f804fb8f665ef1f89 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 02:58:45 +0800 Subject: [PATCH 25/34] Update mllm_demo.json --- data/mllm_demo.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data/mllm_demo.json b/data/mllm_demo.json index b5c97387..155bbd26 100644 --- a/data/mllm_demo.json +++ b/data/mllm_demo.json @@ -19,7 +19,7 @@ } ], "images": [ - "data/images/1.jpg" + "images/1.jpg" ] }, { @@ -42,7 +42,7 @@ } ], "images": [ - "data/images/2.jpg" + "images/2.jpg" ] }, { @@ -65,7 +65,7 @@ } ], "images": [ - "data/images/3.jpg" + "images/3.jpg" ] } ] From f8c26e6a346ca0f18f3b05b6fc7413f3625fb220 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 03:03:36 +0800 Subject: [PATCH 26/34] Update dataset_info.json --- data/dataset_info.json | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/data/dataset_info.json b/data/dataset_info.json index 4958d8c7..c7a1556e 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -58,6 +58,21 @@ "tools": "tools" } }, + "mllm_demo": { + "file_name": "mllm_demo.json", + "file_sha1": "b6709b23657d5c42a701f1c5574f3a6edaa40a20", + "formatting": "sharegpt", + "columns": { + "messages": "messages", + "images": "images" + }, + "tags": { + "role_tag": "role", + "content_tag": "content", + "user_tag": "user", + "assistant_tag": "assistant" + } + }, "example": { "script_url": "example_dataset", "columns": { @@ -185,6 +200,7 @@ "ultrachat_200k": { "hf_hub_url": "HuggingFaceH4/ultrachat_200k", "ms_hub_url": "AI-ModelScope/ultrachat_200k", + "formatting": "sharegpt", "columns": { "messages": "messages" }, @@ -193,8 +209,7 @@ "content_tag": "content", "user_tag": "user", "assistant_tag": "assistant" - }, - "formatting": "sharegpt" + } }, "agent_instruct": { "hf_hub_url": "THUDM/AgentInstruct", @@ -204,6 +219,7 @@ "lmsys_chat": { "hf_hub_url": "lmsys/lmsys-chat-1m", "ms_hub_url": "AI-ModelScope/lmsys-chat-1m", + "formatting": "sharegpt", "columns": { "messages": "conversation" }, @@ -212,8 +228,7 @@ "content_tag": "content", "user_tag": "human", "assistant_tag": "assistant" - }, - "formatting": "sharegpt" + } }, "evol_instruct": { "hf_hub_url": "WizardLM/WizardLM_evol_instruct_V2_196k", @@ -413,19 +428,5 @@ "prompt": "content" }, "folder": "python" - }, - "mllm_instruct_example": { - "file_name": "llava_instruct_example.json", - "formatting": "llava", - "columns": { - "messages": "messages", - "images": "images" - }, - "tags": { - "role_tag": "role", - "content_tag": "content", - "user_tag": "user", - "assistant_tag": "assistant" - } } } From 7d812ed8419817a8724f8584996607e8685b5ea9 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 03:22:40 +0800 Subject: [PATCH 27/34] Update loader.py --- src/llmtuner/model/loader.py | 54 ++++++++++++++---------------------- 1 file changed, 21 insertions(+), 33 deletions(-) diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 5b5c0a4d..0ff7a350 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -1,12 +1,6 @@ -from typing import TYPE_CHECKING, Any, Dict, Union +from typing import TYPE_CHECKING, Any, Dict, Optional, TypedDict -from transformers import ( - AutoConfig, - AutoModelForCausalLM, - AutoModelForVision2Seq, - AutoProcessor, - AutoTokenizer, -) +from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForVision2Seq, AutoProcessor, AutoTokenizer from trl import AutoModelForCausalLMWithValueHead from ..extras.logging import get_logger @@ -19,13 +13,19 @@ from .utils.unsloth import load_unsloth_pretrained_model if TYPE_CHECKING: - from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer + from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer, ProcessorMixin from ..hparams import FinetuningArguments, ModelArguments + logger = get_logger(__name__) +class TokenizerModule(TypedDict): + tokenizer: "PreTrainedTokenizer" + processor: Optional["ProcessorMixin"] + + def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]: r""" Gets arguments to load config/tokenizer/model. @@ -41,7 +41,7 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]: } -def load_tokenizer(model_args: "ModelArguments") -> Dict[str, Union["PreTrainedTokenizer", "AutoProcessor"]]: +def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule": r""" Loads pretrained tokenizer. @@ -75,25 +75,14 @@ def load_tokenizer(model_args: "ModelArguments") -> Dict[str, Union["PreTrainedT logger.warning("New tokens have been added, changed `resize_vocab` to True.") patch_tokenizer(tokenizer) - tokenizer_modules = {"tokenizer": tokenizer, "processor": None} - if model_args.use_mllm: - try: - processor = AutoProcessor.from_pretrained( - model_args.model_name_or_path, - use_fast=model_args.use_fast_tokenizer, - split_special_tokens=model_args.split_special_tokens, - padding_side="right", - **init_kwargs, - ) - except Exception: # try the fast one - processor = AutoProcessor.from_pretrained( - model_args.model_name_or_path, - use_fast=True, - padding_side="right", - **init_kwargs, - ) - tokenizer_modules["processor"] = processor - return tokenizer_modules + + if model_args.visual_inputs: + processor = AutoProcessor.from_pretrained(model_args.model_name_or_path, **init_kwargs) + setattr(processor, "tokenizer", tokenizer) + else: + processor = None + + return {"tokenizer": tokenizer, "processor": processor} def load_config(model_args: "ModelArguments") -> "PretrainedConfig": @@ -132,11 +121,10 @@ def load_model( if model_args.mixture_of_depths == "load": model = load_mod_pretrained_model(**init_kwargs) + elif model_args.visual_inputs: + model = AutoModelForVision2Seq.from_pretrained(**init_kwargs) else: - if model_args.use_mllm: - model = AutoModelForVision2Seq.from_pretrained(**init_kwargs) - else: - model = AutoModelForCausalLM.from_pretrained(**init_kwargs) + model = AutoModelForCausalLM.from_pretrained(**init_kwargs) if model_args.mixture_of_depths == "convert": model = convert_pretrained_model_to_mod(model, config, model_args) From e16f128dc397d71e74398fa38818e57e80cc32f4 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 03:29:12 +0800 Subject: [PATCH 28/34] Update workflow.py --- src/llmtuner/train/sft/workflow.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index c5acb4bc..3ead9edf 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -28,11 +28,10 @@ def run_sft( generating_args: "GeneratingArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer_modules = load_tokenizer(model_args) - tokenizer = tokenizer_modules["tokenizer"] - processor = tokenizer_modules["processor"] - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="sft", processor=processor) - model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) + tokenizer_module = load_tokenizer(model_args) + dataset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module) + tokenizer = tokenizer_module["tokenizer"] + model = load_model(tokenizer, model_args, finetuning_args, is_trainable=training_args.do_train) if training_args.predict_with_generate: tokenizer.padding_side = "left" # use left-padding in generation @@ -49,8 +48,7 @@ def run_sft( # Override the decoding parameters of Seq2SeqTrainer training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams - if model_args.use_mllm: - training_args.remove_unused_columns = False + training_args.remove_unused_columns = False if model_args.visual_inputs else training_args.remove_unused_columns # Initialize our Trainer trainer = CustomSeq2SeqTrainer( From 3408af236f0b4ef64c3bfa791ef757828a74da7f Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 03:33:07 +0800 Subject: [PATCH 29/34] Update loader.py --- src/llmtuner/data/loader.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index fa4aa9c1..ca0d5407 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -16,12 +16,13 @@ from .utils import checksum, merge_dataset if TYPE_CHECKING: from datasets import Dataset, IterableDataset - from transformers import AutoProcessor, Seq2SeqTrainingArguments + from transformers import ProcessorMixin, Seq2SeqTrainingArguments from transformers.tokenization_utils import PreTrainedTokenizer from ..hparams import DataArguments, ModelArguments from .parser import DatasetAttr + logger = get_logger(__name__) @@ -114,12 +115,12 @@ def load_single_dataset( def get_dataset( - tokenizer: "PreTrainedTokenizer", model_args: "ModelArguments", data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo"], - processor: Optional["AutoProcessor"] = None, + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"] = None, ) -> Union["Dataset", "IterableDataset"]: template = get_template_and_fix_tokenizer(tokenizer, data_args.template) if data_args.train_on_prompt and template.efficient_eos: @@ -149,7 +150,7 @@ def get_dataset( with training_args.main_process_first(desc="pre-process dataset"): preprocess_func, print_function = get_preprocess_and_print_func( - tokenizer, template, data_args, training_args, stage, processor + data_args, training_args, stage, template, tokenizer, processor ) column_names = list(next(iter(dataset)).keys()) kwargs = {} From f62cadb258f85ddf6b595c8f6fe3385c2e0e3376 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 03:35:39 +0800 Subject: [PATCH 30/34] Update parser.py --- src/llmtuner/data/parser.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/llmtuner/data/parser.py b/src/llmtuner/data/parser.py index 4d3d7741..01a417a9 100644 --- a/src/llmtuner/data/parser.py +++ b/src/llmtuner/data/parser.py @@ -25,9 +25,10 @@ class DatasetAttr: subset: Optional[str] = None folder: Optional[str] = None ranking: bool = False - formatting: Literal["alpaca", "sharegpt", "llava"] = "alpaca" + formatting: Literal["alpaca", "sharegpt"] = "alpaca" """ columns """ system: Optional[str] = None + images: Optional[str] = None """ columns for the alpaca format """ prompt: Optional[str] = "instruction" query: Optional[str] = "input" @@ -44,8 +45,6 @@ class DatasetAttr: observation_tag: Optional[str] = "observation" function_tag: Optional[str] = "function_call" system_tag: Optional[str] = "system" - """ columns for the mllm format """ - images: Optional[str] = None def __repr__(self) -> str: return self.dataset_name @@ -105,21 +104,18 @@ def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]: dataset_attr.set_attr("folder", dataset_info[name]) dataset_attr.set_attr("ranking", dataset_info[name], default=False) dataset_attr.set_attr("formatting", dataset_info[name], default="alpaca") - dataset_attr.set_attr("images", dataset_info[name], default="") if "columns" in dataset_info[name]: - column_names = ["system"] + column_names = ["system", "images"] if dataset_attr.formatting == "alpaca": column_names.extend(["prompt", "query", "response", "history"]) - elif dataset_attr.formatting == "llava": - column_names.extend(["messages", "images"]) else: column_names.extend(["messages", "tools"]) for column_name in column_names: dataset_attr.set_attr(column_name, dataset_info[name]["columns"]) - if dataset_attr.formatting != "alpaca" and "tags" in dataset_info[name]: + if dataset_attr.formatting == "sharegpt" and "tags" in dataset_info[name]: tag_names = ( "role_tag", "content_tag", From fcd09112d5929f5a4c502c5886050083e69122f9 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 03:48:34 +0800 Subject: [PATCH 31/34] Update aligner.py --- src/llmtuner/data/aligner.py | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/src/llmtuner/data/aligner.py b/src/llmtuner/data/aligner.py index 6fd6f404..dc1de865 100644 --- a/src/llmtuner/data/aligner.py +++ b/src/llmtuner/data/aligner.py @@ -1,3 +1,4 @@ +import os from functools import partial from typing import TYPE_CHECKING, Any, Dict, List, Union @@ -13,8 +14,10 @@ if TYPE_CHECKING: from .parser import DatasetAttr -def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: - outputs = {"prompt": [], "response": [], "system": [], "tools": []} +def convert_alpaca( + examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr", data_args: "DataArguments" +) -> Dict[str, List[Any]]: + outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []} for i in range(len(examples[dataset_attr.prompt])): prompt = [] if dataset_attr.history and isinstance(examples[dataset_attr.history][i], list): @@ -44,11 +47,18 @@ def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") outputs["response"].append(response) outputs["system"].append(examples[dataset_attr.system][i] if dataset_attr.system else "") outputs["tools"].append("") - outputs["images"].append([]) + outputs["images"].append( + [os.path.join(data_args.dataset_dir, path) for path in examples[dataset_attr.images][i]] + if dataset_attr.images + else [] + ) + return outputs -def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]: +def convert_sharegpt( + examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr", data_args: "DataArguments" +) -> Dict[str, List[Any]]: outputs = {"prompt": [], "response": [], "system": [], "tools": [], "images": []} tag_mapping = { dataset_attr.user_tag: Role.USER.value, @@ -84,7 +94,11 @@ def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr" outputs["response"].append(aligned_messages[-1:]) outputs["system"].append(system) outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "") - outputs["images"].append(examples[dataset_attr.images][i] if dataset_attr.images else []) + outputs["images"].append( + [os.path.join(data_args.dataset_dir, path) for path in examples[dataset_attr.images][i]] + if dataset_attr.images + else [] + ) return outputs @@ -97,12 +111,13 @@ def align_dataset( prompt: [{"role": "user", "content": "..."}] * (2T - 1) response: [{"role": "assistant", "content": "..."}] * N (N > 1 for ranking dataset) system: "..." - tools: "..." + tools: "...", + images: [], """ if dataset_attr.formatting == "alpaca": - convert_func = partial(convert_alpaca, dataset_attr=dataset_attr) + convert_func = partial(convert_alpaca, dataset_attr=dataset_attr, data_args=data_args) else: - convert_func = partial(convert_sharegpt, dataset_attr=dataset_attr) + convert_func = partial(convert_sharegpt, dataset_attr=dataset_attr, data_args=data_args) column_names = list(next(iter(dataset)).keys()) features = Features.from_dict( @@ -115,7 +130,7 @@ def align_dataset( ], "system": {"dtype": "string", "_type": "Value"}, "tools": {"dtype": "string", "_type": "Value"}, - "images": {"feature": {"_type": "Image"}, "_type": "Sequence"}, + "images": [{"_type": "Image"}], } ) kwargs = {} From 7f3bd35c0ead92710036064bf306740e8ee901c7 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 04:10:28 +0800 Subject: [PATCH 32/34] Update preprocess.py --- src/llmtuner/data/preprocess.py | 66 +++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 19 deletions(-) diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index be566a5b..0b467724 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -8,15 +8,26 @@ from .utils import Role if TYPE_CHECKING: - from transformers import Seq2SeqTrainingArguments - from transformers.tokenization_utils import AutoProcessor, PreTrainedTokenizer + from PIL import Image + from transformers import ProcessorMixin, Seq2SeqTrainingArguments + from transformers.image_processing_utils import BaseImageProcessor + from transformers.tokenization_utils import PreTrainedTokenizer from ..hparams import DataArguments from .template import Template + logger = get_logger(__name__) +def _preprocess_visual_inputs(model_inputs: Dict[str, Any], processor: "ProcessorMixin", image: "Image") -> None: + image_processor: "BaseImageProcessor" = getattr(processor, "image_processor") + pixel_values = image_processor(image, return_tensors="pt")["pixel_values"][0] + if "pixel_values" not in model_inputs: + model_inputs["pixel_values"] = [] + model_inputs["pixel_values"].append(pixel_values) + + def preprocess_pretrain_dataset( examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments" ) -> Dict[str, List[List[int]]]: @@ -47,10 +58,10 @@ def preprocess_pretrain_dataset( def preprocess_supervised_dataset( examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", template: "Template", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], data_args: "DataArguments", - processor: "AutoProcessor" = None, ) -> Dict[str, List[List[int]]]: # build inputs with format ` X Y ` and labels with format ` ... Y ` # for multiturn examples, we only mask the prompt part in each prompt-response pair. @@ -90,17 +101,15 @@ def preprocess_supervised_dataset( model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) if processor is not None and "images" in examples: - pixel_values = processor.image_processor(examples["images"][0], return_tensors="pt")["pixel_values"][0] - if "pixel_values" not in model_inputs: - model_inputs["pixel_values"] = [] - model_inputs["pixel_values"].append(pixel_values) + _preprocess_visual_inputs(model_inputs, processor, examples["images"][i][0]) + return model_inputs def preprocess_packed_supervised_dataset( examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", template: "Template", + tokenizer: "PreTrainedTokenizer", data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format ` X1 Y1 X2 Y2 ` @@ -145,8 +154,9 @@ def preprocess_packed_supervised_dataset( def preprocess_unsupervised_dataset( examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", template: "Template", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format ` X` and labels with format `Y ` @@ -176,14 +186,17 @@ def preprocess_unsupervised_dataset( model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) + if processor is not None and "images" in examples: + _preprocess_visual_inputs(model_inputs, processor, examples["images"][i][0]) return model_inputs def preprocess_pairwise_dataset( examples: Dict[str, List[Any]], - tokenizer: "PreTrainedTokenizer", template: "Template", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build input pairs with format ` X`, `Y1 ` and `Y2 ` @@ -218,6 +231,8 @@ def preprocess_pairwise_dataset( model_inputs["prompt_ids"].append(prompt_ids) model_inputs["chosen_ids"].append(chosen_ids) model_inputs["rejected_ids"].append(rejected_ids) + if processor is not None and "images" in examples: + _preprocess_visual_inputs(model_inputs, processor, examples["images"][i][0]) return model_inputs @@ -248,12 +263,12 @@ def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: def get_preprocess_and_print_func( - tokenizer: "PreTrainedTokenizer", - template: "Template", data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo"], - processor: Optional["AutoProcessor"] = None, + template: "Template", + tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], ) -> Tuple[Callable, Callable]: if stage == "pt": preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args) @@ -261,25 +276,38 @@ def get_preprocess_and_print_func( elif stage == "sft" and not training_args.predict_with_generate: if data_args.packing: preprocess_func = partial( - preprocess_packed_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_packed_supervised_dataset, + template=template, + tokenizer=tokenizer, + data_args=data_args, ) else: preprocess_func = partial( preprocess_supervised_dataset, - tokenizer=tokenizer, template=template, - data_args=data_args, + tokenizer=tokenizer, processor=processor, + data_args=data_args, ) + print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer) elif stage == "rm": preprocess_func = partial( - preprocess_pairwise_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_pairwise_dataset, + template=template, + tokenizer=tokenizer, + processor=processor, + data_args=data_args, ) print_function = partial(print_pairwise_dataset_example, tokenizer=tokenizer) else: preprocess_func = partial( - preprocess_unsupervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args + preprocess_unsupervised_dataset, + template=template, + tokenizer=tokenizer, + processor=processor, + data_args=data_args, ) print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) + return preprocess_func, print_function From e057c8de486bfbc829240924f9238d6212c917f1 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 26 Apr 2024 05:34:58 +0800 Subject: [PATCH 33/34] support mllm hf inference --- data/README.md | 3 +- data/README_zh.md | 3 +- data/dataset_info.json | 2 +- data/images/2.jpg | Bin 49681 -> 22681 bytes data/images/3.jpg | Bin 70036 -> 16105 bytes data/mllm_demo.json | 2 +- examples/README.md | 1 + examples/README_zh.md | 1 + .../sft_mllm.sh} | 25 +++++++------- src/llmtuner/chat/base_engine.py | 3 ++ src/llmtuner/chat/chat_model.py | 14 +++++--- src/llmtuner/chat/hf_engine.py | 32 +++++++++++++++--- src/llmtuner/chat/vllm_engine.py | 14 ++++++-- src/llmtuner/data/preprocess.py | 8 +++-- src/llmtuner/eval/evaluator.py | 2 +- src/llmtuner/hparams/parser.py | 3 ++ src/llmtuner/train/dpo/workflow.py | 5 +-- src/llmtuner/train/orpo/workflow.py | 5 +-- src/llmtuner/train/ppo/workflow.py | 5 +-- src/llmtuner/train/pt/workflow.py | 5 +-- src/llmtuner/train/rm/workflow.py | 5 +-- src/llmtuner/train/sft/workflow.py | 4 +-- src/llmtuner/train/tuner.py | 2 +- src/llmtuner/train/utils.py | 6 ++-- src/llmtuner/webui/chatter.py | 5 ++- src/llmtuner/webui/components/chatbot.py | 15 +++++--- src/llmtuner/webui/locales.py | 11 ++++++ 27 files changed, 130 insertions(+), 51 deletions(-) rename examples/{mllm/sft_llava.sh => lora_single_gpu/sft_mllm.sh} (56%) diff --git a/data/README.md b/data/README.md index 2ea0c117..6de0430f 100644 --- a/data/README.md +++ b/data/README.md @@ -18,7 +18,8 @@ If you are using a custom dataset, please provide your dataset definition in the "history": "the column name in the dataset containing the histories. (default: None)", "messages": "the column name in the dataset containing the messages. (default: conversations)", "system": "the column name in the dataset containing the system prompts. (default: None)", - "tools": "the column name in the dataset containing the tool description. (default: None)" + "tools": "the column name in the dataset containing the tool description. (default: None)", + "images": "the column name in the dataset containing the image inputs. (default: None)" }, "tags (optional, used for the sharegpt format)": { "role_tag": "the key in the message represents the identity. (default: from)", diff --git a/data/README_zh.md b/data/README_zh.md index b00f81d9..fb6cb1d9 100644 --- a/data/README_zh.md +++ b/data/README_zh.md @@ -18,7 +18,8 @@ "history": "数据集代表历史对话的表头名称(默认:None)", "messages": "数据集代表消息列表的表头名称(默认:conversations)", "system": "数据集代表系统提示的表头名称(默认:None)", - "tools": "数据集代表工具描述的表头名称(默认:None)" + "tools": "数据集代表工具描述的表头名称(默认:None)", + "images": "数据集代表图像输入的表头名称(默认:None)" }, "tags(可选,用于 sharegpt 格式)": { "role_tag": "消息中代表发送者身份的键名(默认:from)", diff --git a/data/dataset_info.json b/data/dataset_info.json index c7a1556e..d053be1d 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -429,4 +429,4 @@ }, "folder": "python" } -} +} \ No newline at end of file diff --git a/data/images/2.jpg b/data/images/2.jpg index 74771224baf46a8ab64f5e068dccca662a49fddc..1df98231a1c1009f5baccbdcd5e5432c443c93b3 100644 GIT binary patch literal 22681 zcmbTcbx>SS6ec=^ARz=|xP<^AxVuaO!2<+$9ei*dU|<3S2<{L(1or?lxVsbFb#Nba z@a4B}YhTs->+PP~x2kV-_pS5wx$^bt?_uU)1@KZ{T22~(@o20VtN_5nD&V~o#M%M? zP*emk0{{Tr$F|o1%*PtWUF|BZSq__#Q@|MmDkvxio|D_lSi1}i4U zd%%-d7?`gx9y$RukG^94hX~_;IsAD21oJ5t_M=O9&mTJ=UOu{yiTUKwS*)i|A6tDM z+W}8sVZEm1`i%WX#RTWQ(_3!8m@Hg6iHbHN)zM>m9#d!kXL!UUq-5j_9~eI}G4t~A z3kV7cOMa1(mXVc{S5w!})Y8_`1(}&!SXx=zxVXBxdq6zB0)7Mr1^)~Qjg5;>NKE>b zobo3-CpRy@ps=X23XZ6*sjaJT@96C6?&Lo){|~NLkGP&Z#^5Q=f4DH7c>D+a)l)25F6`HzRd7t4-n{4b z!+k3elU337jE+b3n8?(56pxsmca;J4AGH68?EfDy|Nkpw{~Or>HW zJ6|m@K+vpH20O52xaTz=B92s$LJ7>;PHNQfa~S}GMOFJVxf9KYg~~cs9{~HFW$zZ9 z@XiJMUCTcmkL(w^*Dp}Jf;TwWPTUF#f30r8r7R6H6 z3O;Ed3^F7gH=aINx+c02eV4?xFg-?I-a2D@rV+J&@c{VPiWeZ%W%l_-c#t(AV^MFY zuX_n_AWzyaXHD-AIap|#le(>flIipzQ9S-S+JW!#Rm)a3>~^o69tDrJ!K;$!oo%I_ zUD2u$XdQ@i{l`j6f|K6F!lHebS(b<#4qAppz1#3Tce!&9NT{1$@*#NhN-2>foyv0m zvUmX!prEfuDIMsX9lCSY0)T|Ii8GOx`qyxKMt*l2p>9#`M>JzvjY43`2!)@Ti=dVA zPm_l!J)L$hcnzioY+}24;2_35eZ=208?8;%Z7&El_aKvT8WT$!4<8?!-rA+VF2zBQ ztscH8DrWhlJPoRKObj+6QnY@HS7MYATrR3r22r@kK#cKOmms4yV+Oc(`WXH6w?ceB z>Kj7J6w0eAu)>1q%A5?R^it>tLmWLmuG($p}E6t1vL(nV*a8Nspr-d18G~f5o5q;dIZ20n7;t)i@Uv}7 z;$d}<0rpvjn;D@xu-C(RB5pJJ9+*cUG3)jwzR5QS9N9ugey}RT&-&`h**~^%=I7+-TR+TrOv-P3heI-5~THy@h9*sDYAH*oozANF>H@wL|yJ1 zt=K`G#jT4eenfobc5wI2CQU~DS*;VA{UR)RbJ?Pu1{ItWWT0^g2y(P%dT*jOj zKBiiU!S%AT6YM2VBr2Cj7G#t(o$EoI`Se_}#w|S6$35kOFyH}D**dlVxej5sFU)I% z)L{PS&8tZbH-(4?lz2LQWI45{$1Llg(sM6d3E!$_Sws-dOz0SPR8#$t-df)=49ULl zto8~H$a?@tFFF=~hc;$Nx%8_XdcQrDmFBlhyKkp?@c>xmm~_ZXy^}A)pr>>e*2tW@ z{91_yA}I*j8D^b>>5=7pdqB*_w+{epzGo%KV4I>tdUgSql#}H-SbNPfrKm-fPli-Y zz=jGu!+W}OEIZuurjYTbm^zY$n&kvyUb2=LB;5bAk(}U>4rR?zNE$LfYNGvt~SrW^Nhp;(%~B*)drix|2)iru&3T z8%Gs6p7mT6dHu0n=(6#ps(gmE#bsHGG}_N8N9U&wbV8$bXr?(z+=oIYrqt{zv)W55YVQH? z$_*TLm2;!Ex=!y8p5UbkB(KiOTL#W<5W_K0wo^id(saYV{&__*Y_P2ZyWk7K+uLXCM^{tLJ91+` zEB{4t;XR#Ww`{pLvdHUg#WqKo;!RUoe0-$HcrOOZ&+kZQL6?FssS{LzZv;(LW!8e? z{YFH!PTv>mwX6-R9<+Qvn`u61G|g9jqQhb)fzTB5jDG@55QEBoT949=DlsLQd-Cs? zIFptlD)WAd^;ir z>rQn$URG|{vkzPL3$=kn%4l?BiQK=&j%wM`xv{x9CHJ;IIa!tzF+>}Hh~tMS^>l>v zo$kG+`BokPLGG6VgH`Ms#x8Lw2Q;08qO3oTr8%+8XKn02t|i4a83~gzRG}(K%_Hz$ z8zT(OH`XlF=7qP66>wC$eS~{*gZpNa1xJ9F`bEWIAjQV84>%3QS8QPO0I-S>M~(cO zR>_7Wx{j|!G@nE&sts1?7Zz5v^{D1Eze-8W{eY&(&OW>mHoJB!j)5|dBR@5(u6R!d zqd>PU^42IdP8vQ-$^pvE?)%e@jW2A+-Cci05%x1@<%D+y&2+lAA3fnsV&%L|5{uc~ zi#|aV$?VU&ngvT$jeAdch@SQ~sAC@m819P7ynA<~zSnn?-K+>DK_Ppp#RaF2QmDk5 z$6>6?vzH_IE{e`wT_9mbj+c@kiJQ+ zMgL(dkbRv>i4cK?-Q$iPKL9$G@%PKu;<{dgZ9pt`uBA>?-A|GXV!EFZw8a}&bPxo5 zXpu;K|3f4BEWUE#O9`@Km8R0;;Ia^Ef@%qdG7l6O&DKHAW$IEB;eF5Q)e@IqZ{aHs z;^H`!=Q5&S{aKLPlp(cRAD}h%;LiAQC(SZv+hPpELkFzn?rxK_jo?A9kGc%;M-eQ)Gi@eVS5u6CBXgw!FiK=MY zye$1ZYf~e7I~SoZtP;nwfyVgL?`s7*n$XaTnng-!j>J*>a2fDG{FTd9zNNeUVs}bG z_kx@hH25LxIT`!w8+RG5?!aBOZ5;WDQD2f-2sDaGow^;KscGuvo>yHqLn?mW$n(|I zOEFd}wf&{Cpaq_L8fs6A7mc0#G^X%M9~4^rA@{=)d9gx46+xrf*@2!6fA`^LYk2`$ z;`vpIG?4duAzvy@M;L`d`T?a~w@@h)0_UmA)bFHkcFn0aiPmBF?jH`ak_w?hf18t2 zZDE|Wek&S0Xl}lVAb-iow`|&P0?{TVgPTtHQUBe$YiKOm- zul#5rT1od~4*)_@M!vrf0IAT)WF?u)NbuZ;B#&JeqmPeI(x>ihE89ahdh=j9C!BnG zt)`CJ{&T08e4*{(!4I@*g`O-K!N(2V)US?lI^q$HkvLy5YBQWd51`NQLJIX{3bwaw z*<#-cU{2E{_`=Zco0r{l!ZUL2yZ!0}UelAy6~5^%kNg#jFTI0@)hyojVgbC%b4_Gr z;!|R7=~6@XOL*eyb;X_`!|{1b2#2P^ug*Ya$!{_%ckF@_&oyqbtOnkf0nUrcPcx0W zn@RR&f*X_9U3g~3kb<|(39LZo@)q6fSC}uR-}$=Hdn7-4n%{<8o_C736N5)SXF?rqGfyEr0 z9sqm!6c>znGAH>5u204p=V;B%H8t@|aG!DtJtarOF8&_p|7;{gZ5d9ox%wFHNd=t{ zDSnynWANww>mZw9Ec& zq01v5+p^Y=j`ckc(IR4N_GMhoM6o2Caxw18nW=o_dPcEIWj z>^<9(nvX-SA+=l)&EI?tJ)k{L-^8V9Ro?Q3b-?i9;0S?^ntY|2f9z#!@BB^OKkc!^ zA@troSx=TVzPZXMuZgG7x?Jy_}J_P+#b+)tj zjgi5gcpqOqnjwOMT_~aJP&8{70c%mXx3&|FjDHq-@a0Tg>X%6EK?WP-8jfa5^E%G} z7_~5LCB`lqy!l%pbV!L+G%J`O)A^Hj`wwYjdUJyo_J6}|?x^ov-GFl=|7K3qP+4}7 z)dS!))L_L&Vrpr0isi*~MdAq8F4@3-*?ko2)3|7zUzKm*W;7J&u(dE45u^y*;0&#f z6dcIBvg*&0yA!tKeIqf!VcD8}nmjToam-jLg_v3zU;W6xKb> zHsjh0qCZg6DdldKoJ#IdUXyCk{U_Bk^M!kcJU1YSRHNK+{qCCJrJQ}8(Qs4Zyi&%+ znjpyFMAi|f)QdgjhGh4V)VX?^Ohhy_r49J!h6vskPxT=+1q;-JrPWQ>&wvzX6gNYh zd_#3yy*UAqJ^4FEuD(OWhgMXhmNkg=G=`R``|_%e*+loS1F4LDqeVmujVt04H<;|E}J01VEL`(?h@L*y*R zye2^yN85|sD7Ie_eXJb0PF@T*v5ZHb#Vnkdk$yeH8H}TWa&Iy@9{3g|=|=sb(%+N!EZ+AZPdn9E zJX`R39p5kI+Pgawi;pm58H4qp97f)%*A4 zxsM0OkJa(U+6idUZ~U5Xf(1a+BMeBx#ZPoHK^S{QR!{a!ry)2Dcik-zT5}Q}m;*YM zgJT(fB5?5=`J8X9SPo99jx??f)BA@Ya}Aq+^Ind&HhkquMD8?OOk-SVTO!SG6Q!T8 z>$IWkn;({H2I>Fj(x#Rm`&r;a!hy%sTr#6{O~hofn$Y>5-Tt9h?mr&@1Yqf-j$^+% zGlwMR&!UBwXSdeJ5Y;hl5BobOBRX%8m+r1 zq-ig}Cijovqa& z_cgzJ=T0F&o8^hcaHsSpES(Az-%mpgy?M*2T3mZ02SXobu$`$a&f>P<5M*qSaVR&^ zJra5U`i74}lhX1oElC%S26k7@ccVs}af8n(wFvA}ov%UDxq3kx30e+kC@1Vu{ti_UM&~jw=zXV&CA{Sd*`YPWoq!Kp8ca*$t!9E{*tH(<2B-( za+>AEupU(%rP{A#dR4BzzQRP|s$DjMqUp7_qc#17n(OEoo_7m>kxO;BWS_RiO8VZq z$ee;&o}@XQYpI-^OJ75-t;KhuQs&U0RU=}k;j~%R@o1xEcOzKm^+-NwMjKy9DVN)q}=xw+o~@64Bu zqq+!*d}o%)ptQ$F?OMynO_0Kh!+#W`Lk>Eta28A!8v7 z4pBL7L|HVwlrg3PGey`R0kevedBMAXE7KE3Sg@6ZW$b3MZTXrd5(Fx*l(ogGehT_2 z%*{>Y`s1n!HUNN+ECdbtP~`1gTua6C^K*7NIN16Nt_4cS;6;YMbrjqO>=_lBK

( z;txm>7Kx>o`Xk`;%BysU=pI9F5%VlBqb8iDLG_PHs1N<`RJ#k+SUmyi;H{)i4!?W4 zyI@~R{-k0n@%O0dptB3>623dUfrA&8Z%k62|9pNFsB&p*&YH^k%;2|HyP*&|ZUq?9 zLPFC`LUWulvm2u`VC_IUI)*xRo=R?-(*l{M;O%BF zHx@<|GIS@2AeZG|VSfW3ta~TL1U-+Vdd~Be5niykkgYab2ox;ngRSrz${>yQiEiHA zYSoXqDe|lae?RBIu)68E=;|Eb&V;2$!J-j4klsCP9ud*lJ|)b??mE{@(L zB!f7pKCYTOL*N45=#nz>Q<>%zd+mYe9gJ@<6SYp0HhZ2r!W5yF$VGv+irLb$+4>23 zh;3=SAH#Yvg)B&-ll`DbN!zc7UW*WPqp>YeQ7ma$_%&8fl2-vEXw5p;18-_OgqDro z6YD1fR~nD;WY4?yV)N$T-K4m!}o-2LnNBWJyjEuASO^5t?Afx!M%*3m?7Lo_RN)a{bD9qAm-bHmBN@&$gT~RZBz~}IxEnLCe zeXlNs(zs`gA3**5H76&|5Q(dj2IePaCz{-To(0tkDHIm@7o0*)(NFY=B)?e-1#JNx6S~pT@eXNq0W*4Uz zfoVmzDgTa`XBYAhw{m^GQ;aSB)7&=0C)aK>VSE#}Q9rqbucU?ZFkd%q^EblneC9|J zFJzcIZ#!N8C!fftcxlK?(r;%|758gUh=O#jb5iZ`QbJc`L&ck$!0)M4 zW!E>VH1nsRGwp|)<#W8auJealWbZi7i%{L89Mv%9!1HH+fC9eEO8C-hpUx|Ml#tIBlOiX&(ElOHW zW$BoG>+w($p1@I!n-CD=(Y=wGz~S95z1<~aUw4X3m*!DvI835PEfo%V2d}`0x%PCK z!;GnzO_dv)Cn-|{e2xa1H0qnJ5>oM(qvXr#$K8zvEvm!6Utm1{%sAY_&vX7~$@UAe z)O_t)PH4OHUQ^ARsXTIMrq$Dv=3nJF$!zpku3;|Gg>AhfO+rci6i#WA>*F~;oO-ac z?}uaN>5kXpbL!s|3|K=8I0x#!BbUM`8Uh-i=CFn%&6OI~R3m>JeYaxGdix%&ku@*8 zepM}k`gn7S_CwP7Y2KMcAxAZeq^rqAJ%!UJ-}i#HnM)IYOx4aDz)xenG`+O5uAy&u z8}!7}reY;e3s{>U0114y30JE(#Wok-Zf7 zwdi;D7n+%}De70=m*<5^(Ia|jpHiqo<6X+F#%d=1E&l`}>c#E=YmzVBPxvccuGrbU zMfyM!sEkeFUrsvj*U#uR32)!f7{-p@<6#|7vd#2S&iz0EuzB8Aj|KSM_l~qEZ8i_x zh4Q^UCTWfGhgltKwyc#37^G@dO<0U)3A~)8Gvud+blU1jn%$jgP(+S_S2$)e*A&a! zG9UBD9k5rsio}_}VePVZ>@~N=`K2St_6<>=3QqD|#>sX9jDBx8+e17RTim*Ql`A!; z8%}|xP>$i|ns(1b4M~DOSyBzfOZMNKrVjT0%rcw;p?p+c^X(ZVjT7Z_vaDz56zki9 zC66yEu|(g9En2sK$3mVoGIjuoeYqeF$kL#E&`QIAQSkAg>rJ}&c8C2m{}>@Rn<^8- ztG-Qki9Y*fSA6^m5wQ_zltJo@XNGC_--_@GV_yf*uxBqaR(-ON4>Cu!UrN+KiR}^; zf6aW(cSAYNSF#V|H+G8d=(cgq7NDQpfVp2=#cBFvDn3UolNoIo^fua`U#qukE=DwY zofY$rYDN0LIX2CLY>iS|(UCRljANd{Y6ER{0WB_ElPCba4+LR*3+r zwo1|ojA$VT+A34s@=S`0+31Qz1sXSa`$)=-!E5u9S8Ic4n}tlG0bhPZ;|gJJ0ygU` ze@Sq!Z+JDhE+5@0yF6<{hfbL5&5ehK6G@lNf3ssjygi?^ZbDRC8gmKb;i7|N#y+z0OO9ncKPuKB1h&EX>FI@pQc zY+X4!?(WO&Qf%@+08EvMAUz#+C^~6$7;@f$24(^#egMqQZOzeYZz2*GIGL*OvYk)r z-ie?0b$L_X3s+6??xgffHTo0J(udq2?H7N9VsB7hdm#Brg`_LhygZ@9o~GdS^V}bK z6Ju41j9_?*&4ss~bhC73CV1t5Zom3icU~&JpEd-f1@a%1IA4&X&v15@+cOHvJk}r# z%{=}EYx{i9^tF^SbN2GzPT2O6VxtWVqr%w-(;M$j6@8AfKW!(seX}UjARR|{;3(Hj zYD7mPxE(J0RA5n16h^WJjB3=N=}`{P5I`j&xzTY3bq#@K*s6luPk578(tp2f#E*pN zYYjO?NKw+8Y}Nb$nq7;o*eDB^h9M3Vi?0#abd6{dWM=5ni{sdq0Cu+lAAY1NNnNGC z5QBI2o6no{mfSB#dqJi}>e%0A7^F1VXdO$Rvvj*BbaO>H>=+sPcDGnO1gI#>yvkK&y&o2UhLmkqXU`Z=1)> z!Fmbd8>(MK3}pTq%d8q@q6o0YI#dcj?BDD+5fXOqj|GXh@CiGG7f!@`{z1f6YQ^mp zopP?%n$+4|JB?*UnlQC}Bi#!3VUb_6(&@APcodqK@%7{x9gu58#fXjQx$f5KU9yCf zo^_J*VIOiSJC-3K$-gwC`P^NFvXX^(@o&bR4Mj1!lU9qOYMQK6t4fN9LHf%JmiJ}8 zr{pDcH>&oU3pvrac{VAwDF<w=9qw5G{?Q^#94EZRp zpVniZtrf@?QH`hru@j7N=8R^{tIHJiA;ZX6qU_TRUgRVN!|r?ESp1jz|0^1T?%3!|3$n{bc4*nP*ETJ zhCZxb1DEithSgae`K$U>VQMaovVHdW+p0o*0-m}2+0uVQVPGs)0xf$;;p0C{xLCF* zJ1W;?t_NmYGFbc`Nj4&>h8-)lV7wr5I+eugv*>a-;dLx+WM={&H7qN<__uud#+(n5q`*vt}=nPEgdedEhSNd}P*RHMl)O zplkVMi5V)tb0B~VMxZ;KMDkMaS*Q%H-+CzSL^n}~(n__`U@UPH$)^dJsnNUz!`S-c zZo#T9ItbONdA#s<)ZSp^d8O=or%c`4F-!L!-%k_X8jnXs+!Z zB{@|$GD~vdJNf{yEdQQCbqxE#;XC^q0@d%ZEHH>+tqQ77k+%IX8avz?txokD1CDn# z;3Evlo8t@C1bW&X&>1X;VFOEjE{M~zrMWV8IC^h6M7zF~|I#cC(#&V->k=QTZsOhf zcXf&k-uKCdlp}$`z6@YP&Pm=yVA})0ysj?ln`JCn_#1dBeWmOGrTn~qdQs7DN3Jet z;c@mw09s|m0nNM|DQeEJnQZ-MFU&K)r!a`dJVjc;UIG6$f}TJU@R^=Es70}imjj{4 z>+*G4rDjPlDp_wA8)rv-B#_Ncn|u3OX*Dzbl}6I3(Vjj!ydlFWUBDuJS+7Cwy5h*J z(eQZ2rq)(pf14u>MqoQ>uH^?GMX84j%Dkzu;ADY0 zyR)HR_-){4&DYd&$kVb@)#J#_f?erT0@LtB4OWmY zNOqc9Y%osl?!%4z)?jbTd2gD;w-it^!6x@pX`V_KCaZs^SB)j}9wVy7Br)s%^$;Vp z%8e$d1y7VsJeNIdb&*4+Ici^b@*>?lPcM66QI@vORkr=Rnfs^T)ZFgt=UbO*$S*H` zYnKSfi9TcTw07X@exHfs&bDqUA=}^qpmF~s;xq(B{JEcMc?MfaQbms_A7=*9qHwwh zN%xY3?>beLtF7z~`xjUi1|>;N7wK>h{+WeW<17IMGFPolt)-?i--k7L-}US*y+TQI zFm-C=`gJr)>Gnx;D)luSF^)2L-&>s$5<_Gz>e1){U$-~E?CpI)nQ6Z7&<@(iBjM#Gw<_QwZE!n?VL~^YgQ(9^<1JO-+XO-Y7hw8^0BEa3ck!H-imA2JoYw8Dz>IkKd@>%HCC+ zH2~j24G{letAgXJ!eWz~N!AxC1u}1lWfCyqzNS~X9@0dlAFm*NXy{4C>t?d4EL}Ax zn_UBknO&3vVaC7yoo|DGc^%KL%1IQw=N2k`yl;P+YWMULY+x2%YeIjPE!V4J3`0s4 zf+AU&+ZXGeWP|LDva>;d*P5AmmA72@!8CkoU{Dz>U1Evad}cmgAdf}D`^GrgHiU#O zSBvEDLS~b{h@WnEh-lY`zu-rS;o-A_^)ITv4*HWj(703Hh*PKWT#XbF&X&EjDoZ-- z>IgpP@cC3|^%2j;SwXn3BtMy>vc5;L774!@2W!!9%SZ(NbTpA!co0rZ7Z{giLDt&1 za8&2S^L-T`OZAC!LCK&ai=3<@|G}=}XHgwkeaY{s(2o{;h_evxlF_mSw|w3bigoit z=Cc!-_i2)+uGi{vU8{VP+%TuF!=|{La@sw&Ws#k~f zyaSYSO!dl225f#(Xjj@TD2fN4Q;=0JkuQ$*K)9r)D8MYZR58GaZA^Vzy%my5o)Yy?l@kwT+P)|`YkgpM zkL^DK8jnP-OK-J^0M4%~#i#zls`HBl5$Zg4-7c9Ud?s!m68WyaQ?7t`0|zzjXv2JF zlH<=lr10V%8WlmCPL~>@0(3Pqk;<?Q{(aQsE4wVFDNxU=JPUhLe|0ROr!|uE z*^@11%cR!TBy)*j;Cx7pa-|AW>X%v))yg;D+bN$ZlQPc@q1>J$9!%>#%G$K^&9y^?O87*krmp9UVAdZ*Iuxcp*eBfOVOtIeQbD1t}aj7 z%huCqA1~9CxY}p|fU`>)R!<9hntE_OUr8-CAItrk?OuipsYpP@4(2}r#SOleqW}J) zx4{`8=4mYDHFEoU1rwh#ola#UTK#o!3a_wrW?hEypGyhJe4FI((=mHukXvlqTmf1Q_FE)nKSWA;C5d! zRxxkYQjG=!_*iNa{03S$YoIr&eMdXG&vuqBOrbyl_SU1&?&R!x!s;YN9?o184)_fK zU_QxMLE4mOiA7ran2#l-zo~y~V4QRbpQXVU3m6uVTYO&Fy=@Y08 zVuoUDwgxXUi{=i|- zG%SMJ3DwgMJC42uPO!4qFb}7KZWQYm^u9Wa%aJK?TUNd1PjlOb;REe|HA=0v%!Bl% zMRkg{tGV~4I2ZIfRSs0ar0=zVI}a-#{ZM#aA<9Kx4#2p3vg=|XRua9SH*^{h<3f`9 zJlwd@jUp*>>wOD;%Sr=H#q=!`?58j+(w2{6t7`gul%Zou`J%X3&LVt`vZkN5Q~q!L zi>z<>4*1{=NJ_9cO@hb79k9Le&b)4tNx{%^pav!$?7w3~!mHy(x*qn;?{>9jAXez9 z2QGwJ?wiooQ;#<6CZ3L}?s}=jkqlyKu3lV`ZsonbBSQ~RaSvN=F+2i)T)JV>;vK(? z#DeR#FyUB}$g1}JboC8|M8i7iPtgGi+Vi;?`s)({5v>iP{b^>IbFEM)tzJm2+gJzU zz#1XAeO~G1FexUwFKuSc;1*F`bV-+MnatoxA(HAKK@z&F_+!dt0Ka;)gI5Mz9BGrN zt3#%{sG;N%x^e;>@tW9DvQL{#8NI$Z7n;Bru*Vx*$TU&al_clDD_X>sveB2pO@O&K z;HZTv{Csb2TZANAt=IfHL?*yLLREbf6NZnEH`GuU|NbCYM2RBONe^Zbfk}if&q>Bu zQN*L&ctVQQ)L^8)b_tzNa3cUQ+-@+BNs=F$eD`5~UfHHy!rXMKi}^Kru)k+x_?cMY z*v^UFLYoo}<#}Imh_w9TZ82%ML)#XWjbO=oJoCW-4=76aAF;FN>>%)?=*^@x+u;)j z&H09K%zu-e`18GEL$i9$X}6O~dNYghtl?T2Eb?DSxUk#}X6Y6t$x1Xoc5k;gE8F|S zRjQtZ4>QIBlcNTmadOqb9u^{IjHpuL8a0ys60Sa>0!H}K<7R3)gc8Kp2>MNpVzpW2 zS0HLVX7zJDm?>DR1bd+Kub^kA~Z^ z%HB4Cx0=o3OIKp`y19lh-2`7d2V^|}#N&H1tTxRxV^=;77(;(w^z5k0-jvaE>CESZ z=U~=g^eELKsqbvSG7YD6+GE3u0>*D>?^b%gx}aUO``My?mD!bVwrMs>{;dq04<|sl zC|j4h{wt|bH4hP)$7)V6WpY2%q1U+Lp2@4?|_|5XNo@Y;b6tgo|+82W~o~{+}m}}NFia1vqM#VvwAn`Z!aX^c~D0Y>CYzW zJ)^cvlU?5z6F?i-j}u6?HYzCPdNVDy2SnLh(P96s=2sk%3N1JPcx@ss4kqg?T4s$V z^4y}i4$*8b38s~^>ju4wmNjRIeWr>_AL}eguayDSL#vK`Dc(B|Sa=*I80psHnk5EPJn;);8uG%F}-!p4(-wUIp77o0r(~JAKclcH1 z;4$=8M3%#iJ!%np}Q(8)if;b1m^357xwkT}^`S~uAQ=3n>e-~Pz#I1Sh znUPX1CLm1nT^!pXyqPIz?_16En|Al26N4q|f5eGy`WDaKZvtn3Vhw*WP852_bd%cA za;&lZ`u6?tT~hEZ!^$%5jyTO&^k;K(H@3x&j@RFdvTE{8WG89TxlRI0CfbiUbIOQZJ$9ZRV@D`>e-cxv%zP8;x!>)mCQ zJc`4>%^&p-iBo9&Y<)224Zg~3eZ1G`-fKD5c8Js(37lvoGbMch46wb4LH7=`Y@DNm zR~Mc5XZHBTL9Rg-T=$=QPI&urtt1j7%mVAVqg(IkLK!;A(vq4qjgr>A1dZ^}{Le2T zcn*_7upPxr$(CCp5@dDlV~(udGq3CxWtTm(wbMy|kC44zq9H94kmHCd``Q_+w$DGT zJSvYxlK@3IVW&cUAxO5 ze_9SHVZGNj9B3wou4U|mMC7-smJrXyf|(iJ{1wn#&uO{RUFrkCj&LsQqPjljp2kZ2 zO+!^hEpd%*Y*Qz{``Akg{7pyFK;teI?UhKOa{uLyVF#jfDg}iw2L=Vt6W?T-QZI*g zjyaA_BOFnY604=R(|jU6jGZc1JG6Ipa9$!My(r4u*8mUon z((OosM7 zSuN!gf`@7~wR>FYHRQ5!?z_f7SzqL~k!(jEZMZ+VWje!^B#fCS4b=m0`LQAQ8%4~1*1 zxpgU*x{Y|>bY#dDLx>W%8)UKHxkq~tCNwIe=-RGHI@c!}pRb}_eZ+^zN$n7`*;D3? zPhXm+2&je*KD*Kqfe4K#=k!>NwU5IWk*dto9yT|KMY|@%%q`u=6>`u0E#+q<^3Bw4 z@3+XI@f-aIL@W(V)#-I_OG?(upbRSpHhUe>g!t&t#gbdOKZDgZ)rYGAmhmyTtJ#j9 zr73+8+t)H7O1rRc$9b8Iy`0d+APNDP^R8_jBmwx zDT`7}>6T0fDaTt`54}S`U?k`0dV-)J4F%*z0wuHGwq~>sB+;8glqj{i-H^PouCeY( zj1j!~9*`zT+SbY_c;)rW#!U;JH}3EUHoaJP-7VQgT2lFEfTvd2IWp8vT<0pksKw1? z8~7YOx7-z7xn!u%wl7yrM^Atni3~NsXd^??jWdpqi__c>WWH>E9|#sB*-x}h9jp)E zv%t$-!)P|GJ;eU_0Pr_QyR0sEDhFTpgtfr!IBrcqo(yGa>_K~qd{exxBHS1W9sXpI z*jl9TQg+1od?$TbM)Rk$5tGSo&!d*CYoh0|z&?RtUud4%4rINHNfXE-(uT(Jcoe#= z`x34N>BL$H4OR$}*0xkn#l$z{+KHY|j66<&HHxs~O^E<;23f6Vyzoy%Em^fObY8pI zj?x)pJ#-T)cMR;*Wsl@PCdZyK z()pY%pi6{%_$V+TMm{DYTv{USMN@@(Nq3B;C=1EFe{sl@fr177_i0WEo_J}mG7^G7 zf$b)_eJqQmd`2PYLleJN6B4>jXRxA1v&CIU#}gU)C0%nz=S3J^xf84V&aGmkBSl=M zeBRQ#(3?xAuFV3dXTv`$R$O7`p)+`eTnl3ekk>snq9%FRrfAzZNw z>?^Q$SdGP1zWx;Rz4uvrxaHd#;gqI~ZV_lDr@Gf!-}U4N6jmpujSL-DpFXwBRhPOY zb-)CIV^-5#dn@iT}qA&BZUJ3B3&kK zCL37507E*y4%RO`Bv_YQjD=Ln2u=8;9RpXpa@v?k)gNI@YK)PcNEkUALU7&93aOJ7 zjm28>_;O*~WI-q`^K+rMxt4YGqjR%|5umy#7ap1nx|s~p(|z9r)1|gSRT(--@xhl! zwC7QevRASB8ex_GuhqX*vJ;#x^c(?x!@jjUyUo8RK%Lxj1(;N&@jOo?>1{oYbDT!r zJrPD_+>RcA{!*yS%Q;h6E4GE0tG0NgMT)Gr?`X0{&8(hum3GDy3~NAo-x(uIU3yeg z<^<=YB?!vAd*AlOB9P>)y^C4|hRg`^19x zY(8S1QtJhIGxv|vsLyjh%ig7&Sh)|p{Ym)Rt#$X)PMi*oMpLc0;j`2D)z2rlW!V*^ zH~9#SB=sA*gA{>SDGAczpbbAd$_qLivXwX;Zz+>>#v}>}Gs|CPColhgbmj>Uzem72 zsNYVAYI(7`&a(_j97+xRGT~VNE?hy0F~5+eY1!5SFof!&!io9+4X28Gnw#SD_qEdh z6{2eJSZkg}d#p6ktR4k}#?QPp^;|`AYh4g!Mh&l)&%3;?40TcBcQRJ@mUXckFrA`& z74ML}t#zmga;ow8?H;JWy5?FR-Fsuu@MRnZucM60r#vqcev~*Y=^n;3Ytk@#Jpj&& z&uF@qsK!JcR;}UbAjP2T{%wBHuNVDJZJN9JG5yglOD$p#05Hto1BP*HNi$oPano^K zMN{$hk$TfPKIdiIZF839srxMUnP^Y5cM)rZ#0{N&|w4{qq2@2P(U5dxcXgDUO8jeJmyZHo8k}m)`7rRl8Lo z8kKJ-v`)TN3N`MdVbggD8Bjk?IGdi+mGq_z*WFXB6%0Cu&5D8I3F|uoT8*W-(wW&W zfcn>`d1dF)Q^h4#_2uM2VHiT|MVL{87L|czfp(`m*VUyteRnhEWHNUr-RVw9F_O** zs>zu5BH!s{7nRg0k9~00UwXX+4>!7@_C6gS+*nr^^OqM{I?NuJ62fmPR%q-M-Bdf2 zloOd2)_N@9A7$n=((JZc`sXrY9vP_OVSKjuySQ)R1aZ=sKHMzeHrUJ3b5GI4L>>-C z>vxA#=<+nTBbvPz<`-eb-ahEv3^H>e%I5$JgB;Q9r4MM7<{z)#yiVz9jKD# zCi(B$%*$o=E_REGFe8a{JWGY~&!Y<6->1&5>{qr1V(cu>L5-NH5*^klz)?+SX(NJL z+Cvio*=oy-DqfqsA;_v;)~s3k(lhYx{^UMg*vYWvk4>LzanMnRnbWL_|?C2VvfRcqe$GwW()VBxx zXKxT^gpp97HG}_`3zY6`cMwBO$QP&b-*|%EER$^;+)i5s`%!I>%g+63`8;L$&@zOo zgX}en&H3KK?PDZ^z@xWnS&RA*9PR05ie($6iluaNIg=j=eJ7g;%U8EK=?(5kQDY~A zD8E~dHNvM5(`ki?TkCSL)|^)(_UT3@pL-M2k7~kyyAhrvRk73ZE=8FW@W6Hr_wXG1At3{1vfd^dHd&$L0_+Mjh zI6~f8F5g~eIY;xaI)Wksn|c6=l4;RAb486n-tbo>q>KL2o$rW8~IUv<~o8`pBa?AoQ5R=_NJE*5v9MMR`RMrqYu_2{-`C; z+<6r{+FMk+L?veeY3a?7{@xe)1~!Q&3dG%+HiWFN<*V?|+D7ukY)Y4Y54kt%kAJCM z|M(*2U`O|JF$Ph;fdXDcB{X<*&e3a&)yE5U^i7hlJ@9V0d*O+_pkNR1zQwD8Y9adm z1Uz+ivJnL~a~Z+;w{khe$48O+T~c57N{GJVX|+=tKA~h(`4~cX8_ScADW}GWn{+~2 z+v%f9%)hgXN6ES2=KUG>6<77tC*Q|C$SB=JRLo=kSGR8?jUS`?aTa>W?qo`}8XGzr zG(Whi1a>yz|{tf`ak zi82(6lq=u8347SDN9~SgfWHplaj)C|M*uw+!stYWw+s-+?t&RkF@gseB$IO5 z<)V*hZ}g~?gfh%?^8WyIGK^=AbMogn;Dh1HoO*$$VGCRNQ$wgp3kS<8wnC6e1J{gt z_Z)IDZY0#MBMoh-Tr6rBE4Wu6a8Bjpc5|GM%Zv=Gtx2J`kA1zok`ssFWp*TWs_r@@GQ4G|`dBW1dTO;B>)1H(cP)D%}C8ANGfwLASV) zMP@Q2HwqY#aBzNLPIllH91mliFAeEhp0(l?()7D)k1FDAn7J5rC2^1k@Q*+`_r^_e zviR@p$QoI2k*V`B9v^7u2u1j z({0E-&p!R@@95CAl2%ClB9tLYnx{{p(RiEUBsyFYERv1tr?}*H{&mG%{4cTirKR0Z z;+t6Y4L0&3vA{;vgk%uPbCrF=aAC+e=Dgp*x~8G=GWm7wS(?Yg@VR?bmA1X_JTV=U zfzN!`4f|350KrUrA@FKBG?vvZR%F8%mf?3EG5TluSEWueo8`5_Rh=7EjJaCt=jG;n z>-$vr0e@$|ikCWEw_bIW$Sy3V##-hU&JGuiz=jRmBRm4N;(jgo%O0I--bgX3smsXw ze;f>-!@YBVwkP})zR%-b)vH?B{iUo?5-gQ%Npm$4O?82Mw$L+KOj~h zL_M+mUGvXAoojqQ4bF1Bm67_@KN+DMx)7@?)wP|L_R{{W`g+61I-iK){>{@JTTq?e zQiB331bl9h@Y%@Zg=`YfoDrJC@dw543VaZ^Ya2Pvxvn?g5xYIM`PBSb z*8c$E8<$P7x@|VfVqWCH5UWA~D0a}H0-!1m33BmV#d!SGeCk>XuT#n*A^+Jv^Y z?QbQNfW71vauTy%+r+1Qtr1XNQ|D;Qfy;C1e__I$Te6&x7R`5;`~&j*cN%6`nvPSb+3;_L{9)qH1zlQA zt9Wy0Yn^7KU}nR}%;o_AcaNMiF6Sy&>2AuAdnWGx`vMR?Er z6$8flPKGp}582=7bAQ4$YVT&6QH9wgl$j@z;z*P)m`GI-u0jH!763DPxLhqv${y0a z6;DNNZ+rP$@jlaqJY38&yF#mSi{9I>(fS%54)~J}qhT65NYZ`jT%GLhPFb6w$v)qW zeIffBe$27{(>@}a>iS3j0EACTU^N+_U>fPzWLl}tMpu_0WFZ5Bz}Mvuf&Mz{J{G;S zn6lnm+{d}o9$wZ2kCgL+(~Mw`pvT_durK@-&%=KUJTD)G^$U;e_}cDCiJ82!zC}>Q zyB>gNJ%{1JSBEOlt<5OjDE)Ie@p?F%l2F0RmZbE%-6y+WFSz|gx6|$Z5$o1AwyWf5 zw*$$yRsR52Za@kU4#Ut61}l41m2a%~Kh69(Y;)T+BoOs^6%Q`;T^BV9}2&Tt!!jE){7YPq|VlwAXw_Xuk9sdxPh)F%Dir>R|T_>y!@cxZRjvD!CiQ!;#j=3 zNgK;nVJNxs$!z2AVC~0Ga7VR%eNzbO)O6d}{LZx(PHqpO(_Css6C{6VlWRBxIvE!k z?Sa&DfN|7u#Xi$ecb5%m8*kt$s08v!9sK~~o;r1|DENl@DI|FDG>l1a-fnCRAHB4k z@q#`5#YZoRF64!yZSlx{U5{~x2eCW?d-dn$2Nhn*I$fC^Ro&A~AbIVGZ1IT7stj|- zey8)S`7VMaZEtTPg+>4ez{tt!eX;M&aJ4)o(@CYr3~@K^(tV2f1nwspVtC^?BLk*uXwmZzL$Op!*r~;Dm^{26p$_B$oE!7_#zkSy*k{3CRpbbH)!G zXRTwVn{jSq%#ddcNXf$B^*QzSu6pytSDNOd_M5Fp-(6$(%`MEcpO@~1=LJZ}001$! zrx=W0wZ7R}XyZm=I1hkG{vrACpF%rUj+#!_E?!4RbN1_b6e}$8@}*Kh8C#M^JbPo% z`qow8x-)#1R|_5n)8?x)k5j%C0q1u=ope)-Qq_c0TOVZU{{Xj7#G4&VHN-kDq<->wwg81t2Rz|` z`A8i=C*0KcKepDXs9Gu2yaC|b&oU-x$JyhBpK^K>C+0cNC$CUVc&qqdO_tQ|ib&-E z7G;xZ9E_0Maexj;_Q2w#x6rI$f^E^#ycT zk7Ck4X@3E^XV(50>1fw(&vz}9+Z}^(xFBQ$7$5<(0gAN;#Sen_P{|yg9q~qr(qS`zV!wO~v8Pw#n0Mi(6p4hxMtkpLuGJwjIXZak`G?P73gL8jt-pM)EsWDWbgT&rdgRw z2~o`}-C3VccP0vgq(!Fyw=fVGq6dyO1iXpc@x&C%MO>aVR(}-ryt$jKQwaI&m~qZN;iEUWAaZ*_%;2Y zk07zuuLNKg-!wMMjB}eM$#@-B)w@_I`&w=JOX-=$F+X6!SUze zcB6K`+J6On9?{DY5}SC0NUwrEMHuhSM^2-Wfh;;-?Dygg9^M=83G3Pnmnz9K>DR3J z3>cP2V(L2dWGA7|1050KXv^TJzIR8FD$;LL@{dgYn(h)6Yq>QBj5s(*cB$*soO}1^ zbJ&sVe+&E#r}!;wZsEAptv<&fc;${#ju`Ifz=r-NUbsC7?O%Rd{1E+@^()UdZxj4R z(f)AJ#2qA9)B>O@eVvX=1!L|y42`i|e#HL(2Q^=_-Duwyt!>$XaGI64EfFB7&nm;O zDn{XgcooZt%`0MK%Nehfk{7hr_dY?=?eyI%L0D~dDWgPD915}(QWOPLAm9w(;X%Ok z$0gU_+INny*=vgfqUJfKxGq$;IR`vsW1mtzi2Ba!{t20+okC^OJ|Os4QnrPa@S1D4aaXE{;I^2{t2JrZ948X)2=iPNq2e4CAH$l(gGV2e)9(#vZYAlj@*wHcsiWi z@{5!CFj7{~`A5ax7xCwaJV^Sy{y5RDFJ`rCR%vA``=ZX~J&^&!yAr^C^2MC7uP^we z;U6A&uHHDWrtt0LF^?h`bmkB&TQl;kdlzXW5_8m%o-to=P5%G{_woLk%B$g9>xBwr zNo}v~r3<&n|yu+fhHA1lB7wQ>DO>10<2KY=RtMjB-Xgb;#>kelz`(EPQcz((0GGh0MsJ zIW7|OEKwI~=WyJ1rvRMeXdM6*%~LnatJLpp}~ZPrN=C{{X>9^q(5| zLKd4+Xl7V}E%NTiUA?`J<6c4WTlUD0$9^hH6~6N=;gQ|ha7H@y^y^oE{H2VeU?FD=6j| z2)6yBx1&2L@f1=`p5^f~$nADhl z>~N|A$DAH;d+yG8Kg&6(o5a@bZn9fkLcy{GZbNgPF#Mi?lbzVf1A$tTcs4O2tZ{Xi z?nQ0hUUD)yJcE!4J+N{*6L?jV%iC{k(nvPU1Y|39Y@Cn|bHUGJ>r?xk&TFGC!&NUc zM$yY9x@1Y@l;84`!Q>P3W1-Fo%~P6ti;IoEWYMu9r3D8GfY>-0$pn**aypy?T?)qj zT8wU0PvPTg;0>hXX~Aws_|XlVV%@h|vrNhmt4H%l!-X&WAe`iZj04;6QFx>$EA%;O zuP0koxPLl7EM`Khw;v%M*v<*a&rIg3uBmYvENz&)p@qa{Z~@Li^&F0Q$8az!r;-r5 zqTH-49PPDO$skn#;NWyz?wS zK4$W}GnEW+pHbTxJ$sRWNiLem>Y!T3aT^j?w&0{^f-~wzr#((-^7TUMQ=R6SUU=h< zWo)lN*$hty=E=@Z2VCUzz^b?NMP(w7v%Vx7!zOUqC!P;-58;Znd#AYBwLyua0J6-o z769;blk6}618R~u-C4Iwvg$!Sgx3>pJKi}XKwG{;9Ou72wXA5xqP&lCo<~XA6;i7# zLCAflE5QTMcLe%VBh*B4#;xXgdECX0e*=sYj?__KD5kD=uaRrN*K5tu-xt>!yWd4JPGX+0` zk~vMSwUt$Z@^jzv;E#GJt@)j-1;L_z;yC1QC9ar-&&}nHzjuxg9r)xA!bQ=R8_z09 z8W!EQ61i>N22VYG#wenTy8&J}+U=C`$TGW50^k5i=ucoh=(aYufJXlSENLhT#Xw~M z0iHcU>6$35BPU=p-b+-wxK)~In%(z;zyPFUzBgogjAN!L)^-dmtJFHyh)2XF`Q6{V&4uUXRW+Tzzy)NSLE z6CA~Eomi`F$oW_k$jQ!g!RjcYsyK>~dN4P%kBmM$@U^r*XYpRNs}9%LE*;d9?CeJX zo&c`fNd2z9A$TV9?Dv}0#nVcHP;Okx6c8D9ZN}5s^Nwhux+9okp#9p9?+C={EkAF6 z7hDU5md?)N8&qs5X95y=J95XbQ|<>`Q@lg_VOjX5%If1-_;cZF;Uj_-gjiH0?%S9S z(n(er91+m*f{LH*%xsmVJNl$KD;V}a9DWmOw+kMr@SjPumGH|gn{ALF+;SvcjHKiP z+Zn*4OtJVE;~A%Ew7(MREow@%t9bH9At(qwSr3c=dJ*r|iYvPfkDTq#7{8JbTK>)d z01doKv43Ou)5TUZ-8(V~?YGAW$I39FcMyA=WFKm|FYF(!T*)WcylJKCOA@<8X0)0E zDCn6VHr6;{)PcnmSJGi}oGv0f(}le>0=yrvhm8uXi+$nyh54|{BgAAUaU?Fn6!5^D z{{Rns*FUR%%N{p_JmT=*#y2K$oHGu?I3qtVsiKPH#^ktIFLgh>4?pn^ zk9DF)y86`Hm}M&%LbwDUmjp4}-=O~h8pM(-gpyTBB{9d93P`{N4o-8)<37CmV4}WS zRO1HXfQIDBsUvxj$L2?sWoDMVBo}^%Os*}9dfEJ1ti+2Q{F}XO#2m?3+oc5xM zaYzVmHp3LVCYn@?hd}OQ?VY$89FB9wF^nA6N%D+dSD4sDE0&F0AoGw&IQkD?Oo}L~ zhTtSuaKk5)^LeTY0;m^|#Bh59l0Jm{gF{e%w0`2r literal 49681 zcmb4qRa6{J7wzEg?gZB)xI-YpgS!Uz0fs>bcX#)ogAEou!QBZSAUK14xI=);|8m!T zy=PUw^yxmkR;{YjRcG&hU4GpJ;48@~$^j7Gj0=Ga0C-&mdB?P!orh^U8zzyLrbKtLiucpU)H000QcNC*gk|7%Ey$S9}) z1T=JvH@6x-01@Hs&YK~jycsGQ0wNLsnE>S-Dj|FZvm+h0B>3H}F% zgouQKh73SK`CqU8=OsWPe8(gG28Q}Sv^TJZHy|_;Mj1X$i>-_QffaARt^lx+-hvY# z5dfqB563jCKaX>xa_~iwit0$1tXEpodU3Hz>NA!$^~lW!jHQ65?;B8mmE` z9p8AtBF8JRHM)KbuMc9*C|_iqWr9QVbfBN+^^~Jhd{m5nhUU3napmJF&A~QfleH1q z3yD|yT^t5B2{|}6V$uG&r<%P}*xW-o+Zhqdo=JZdNR@82acQd|!T$AbFkBAImHJVdwU++_?r z*lkiuryWo%ENAnPkMdu2SaoL*G7-EUbz%|n(rGxuNgsl(A0+%8Hp;!Nfo_1yV2b0I zq~HAiM85-W4^12+-L(a))=DJjTM1l7_C_}cOA=G4{bebo(6~jpgw5xd{N}Dl#8YsQ zEb{tGKk08`=OpaPiqrbKpmQzJ_qLs>Q?^F5oO7~AAsnGV9IoW!zg>otXl}=okfJ%c zpUL!GPBVhMFkb;S;FF2SF3wcP+oNC0bj=X(0H?z4k8D@I`07dOFRqus&#cRpmE!x$ zb3Oeu_F2Lf_BO`sRncG85&%-OIYX;CUuUUl=ZRC%VEpX3v6kHq2&rk$jy7Lo5XJ?t z)8skn*XtD{sAd)fU(y?>Y@HTttT23qa6)sE6F!BCR8ZM)uEqExmsPdc&e;`)oEUsm zb0C=?v*5se14Dly!=%sta=$HoyqeK@GJzm8LKlpBfAk-84|ttv8pP`T3frlnUPpY*~b=GNek zZXF(T9G^Wp!u74j52@*ap z5SfAaeZhjNHDoW35J9qePDY$kG)2=#9@M#Q{jbW*1U34fJ7?dr-D1<+`!t z@`kTxZ4AQ|jKHmBimuY~j5^)+WN`j%3XC=Cxi8I(E$nLqyhxWhp@NGWn7fVkhIMJd z-=IqAsTy&8{kcrn>y<@WIMtF2UwYxf6@#S4D$KpEb7#MxaI|aa-9L@J)yBv*oT@^{ z08HOG30=3N)x5`rgWsdOj+G$eZnAfyjQqWAMSr?^Xm5zzz>G0=p8U3`QjvrkE(3}2 zSWc6AIyl6*ry| ze2m@as&AjSu0Zr-*Eb|{xgq%2Ztl00DpX-9%lnyh4yJdCcOUdrNPgl{#-Piog9%oG z1N$55_>k8-Aa=jm05S31vl>|y#&df7|3<_z-$S`$zrZuowP-5N`g_w-^Q+7CU5f#e zajD0KsS$GI7BzP{0cO?IrH671%WLk$k@Z};-0osi)PVn*sskuwH7>!v+ZE}5-%qR? zswldFv0wxtVB+KBudt>;8J{%ULlcXg2tRm_F{U}VcX<@)OqEP~!)-MuIi%05O-v<* z7#U&8s}|bd65v}r;<-}sh?}+;CwijiFZcjVOMM78(d-m#NY!!as5wuFb}3m!roo-> zdt`%MC4Je_Z&eN)hXbH5E1mV z54r;7W1u0Ll}UVyrS1i68SeD$&koxA=)cM>)OZsN+_p_r*~wbETqWK1IeWz`RVgA3hiXjo+;FThInMOMR$d14tkfH5gCpUWK}F$` zf!NdcxG$& z8B3(?2F2cE)E_}1lI@4O%AJ}LgfnaFTo*k&S4=+eH2~^Pt*;JY9eQoq?}+_g+sJ5B z3i52BI{}g6<3z}3)U`sxZOUPC!g~ezw@qd16}xX}uE5tD3E}aVl8&yfglX8@7zPN^=Af zt|fQBFVD@k$_gTAMXK#bl1RAGxgb%QY+&;o^5@tCk}}=A$JX@^zgKg<>A5-Z z`%`#q5*bsJqvt(SC?l&;E39e1mV&oL>^s0S-=VFoY7U0FSqGJ?VPN`KLrq<+LIlV1 z=EZ;Eh_I&Pa&TOFuSA>*N_T6Czf#wvp5li;LX;=+eyX^7NWf4 z1~-#9VHG@7H=b)xNU^W3#G|1pmHI(NN`^vDrIk?}mA9_Iapexb$&kRG3%M3rBkpzP zsK;*^I#>$({+(N9w(!Le@4!u~AbFUkm{wdq=uf(a=QZKy#4qU^(9OL)m$!wYacj&} z3{E4?+kva-`e#qM7a$5@BC&vv4QlQK7DpR_*R@wg=PN@;n2EE6$Knh>*dz_NQ_>W@ z|Jz)n>hD*|6N~Syz4^YuVaO)Iqy1Mta@Q87I+#Av<}?j z;w2gc>ZnwIciN??o@^BrPyO2tNEygE&6Bm2gz|w;%ZrM(PLbzaA4Owk;B3uCeP5BSJ#+(v2jRm4|EoV{nYEPam%kBg#YYdaFaJ-vo6@ z?#lUU1+v$fZYN{N`j#|%-jmMkT$oQu+~p~c2~y;}4S&nGG~W~T4LV+p9C;>58pN=u zCy2IgNiTecF0~Az?K)tC6EvwSa7reJcbQO-U2zs|QRCBbrE=+pKEy$LL~D94nZ9@* z8SZ0H-|bMoaXye6)J~@as>b6{DoIukOXoLHLW{w!?-|suxmTI^PC{=>!Z+aejw>T{ z+~mQZW15iGnw2E)6+jY>kV$`{OLa>5GenP@V7Qj!y)p$Ovw^FFQR)GIcJ;fK8=?O9 zmP<9c1Tb)c|S!SHN>*=iCQUQ%xNOk--lH?p){aMNNt9e#%lk2sXz7 zEUHy9l0@k@4Dpyu6_xj4h@-B@PtU{Ng1!w6vkkRY6ge1bFM35ix86argR>iXft=Qm zhWo31q>dGYz+JWGn{!h~(`Va~uAP5NvVXje;~4uVt6b3`fXQz#FK;kuzepf`_7$)U zklsr?G|;%Pgrha%Vaq0tNkDXQNn_D4ImXA>rR5iYc@1S|D;iL9qDrw%&$b9F(Y$Cp z>SVpY{*EeP?>&2k*p&BykUE^MjK({F=U=l}knf3-4pR9wXLjwT-ZWnMD}Zf~o1a-D z!LZ&9=TWyWA#Gve-;mxbpuSNwAibzRI)CWKLLJq2HUjsC*+XFMqh6*A=0{GIqJ1%) zSR}}@y2Ja+05hFL>}nonuxxB(iAnoM$#iSENGg`= zDOC#W;l0&EJv=V28iE)HxQ5SHW?ii#t-0{N(6EZ9XM&S{k`c5rcRDVQ3YhLPFHP5i zTUTXzhRw3JYgK|9{{yYbIAvC3WZdmuI|MQx^M@@dN%BStDpTz&Cpc(5-<^}#H&vpM z@u*mmbnGWGE{1Ek7~VfHAt-WPz4+6nT#j)G#`~L*;BVY@G1-FPjmQETo2x|Ymil>I zttvZC@0j$1!<9qKOo#+qm*#@9dNtgEl{9;bE<`!he1mm{$0`3Xc%9&=p;^X8#xv?Nw`HRIfOpGD`hb6;(mgD4@gzX7WpQ zNk9eV+G8%85MvJ{Msx*?vs}F*x~@g31^Led1(Qq1bm2x38MknDHU5iA5$yJp3AI{y zhlIpD43E%hqD;}!Jpt;!9e%K-7xM~8Q>4Y>?fG7OLlC+lWja*JJ~Vq|EX4#e8D>y` zo60(y42c3f1xCH=V$Q5`)0wZfQmM~}Tv1Wc<`AoY9}!`kTBU$(spKf6S>YBvbjzu? zuxoo)YiH%6IiN?>X){%5jqy0-65oPw^lEqsDNyOB$y+ZWX`ds{=vy>{uu=f#w2BX( zhN#R<{g>*vYoX1g++f}>(mrsP?8xtfqJ=sgzl#di_%fIjv8xnN+!Tbb86`JUHQnb> z#A#bM-x3#(B7l3gFx6BRVRAkHVBm-loAfVNfYE7<4 z?zv`5fNIw~>89+){kLWhJZ2&4;?&*Y1CDOL!fZT3d5+Xztyh4jl#;@xcJB*0+M1ru z$00FHENKc>hvskdi4^p^f5sD$D#Ya29D9-IA3Wy261)PuP804hdQVa-J843rT5McT53h%#IX9sB3AxHZqZH3z%>U?^OQQ&D8`<>=PJ*kv~Rj`KZEfViEY=RG4vEeI84H(xpWQ=kHxe~QUv!d&MnjQaX8gUj=yjT zZq+~fVqTMWR{H3kO#Gp}8Ec;OL`@H3gt>bP?CIj<=6Ya!PSNUjrjFLpl?D7KMB+E^ ztvTyq-M1t$)0V->gfj&gBvQZN0q(KGk&Zj9V6&;cg_vnOG&S{g*aESIT>m1Q)ee($ zwfp^#&?4?@B}~^ox;hL_uIWZ}{x#Gw94&5h1ozAtByP{&RgJmM@}-X9s+Ae(hb|FI zs*CF2K9-zXKtpgPDWkA2@n##LpEu2PHT6=iu=9;gJ+8~jmc@`dKl_v>G&g4=iPV~Z ziRORbJ89N2)icl5F4?32XUfa*1!NJ$-<8FF?>yqRfjz*B>M_(}ru{2rc4@AXH6%S3 zBK&|kMy|+>Yl(&%gE&X@s%nts!u4?KV6OMA$42*g`;eHcL=6LY3y-n`qKR|Q)HA0f zU&XNoC;QE_y)tVjCcToMo>qS6vBfc+QtXuq)8x`(!8~&PFMx}5M;qZ6-+Av$G2K7Q zFnI?)pxy(KSrp(FdMiZlS9)7f4j(O(Wn{0(i4hYPedcM!9qo$e;Ms_ed!swQf!eY+ zIKIUq$!~VbDR)kLgKI+{7|*zkaV8%bik^GlaIARWd#r)Dxrh->+(OSBz-3wMe=K)J z1uq`xx~9tQ1p>M19@K69gC!#Tk2Ii}J-^5Ax8kRYtbvtEuYi?SGBWe!%0B6g%LlbK zj35ariK`*+ka5)+Co^UA(H!9k@Ym9RCxu!2wBP^Z7_U0=TIs4}Df3|bXAV!pz!iD7 zyNYzcllZuxw0^rEX4$091vRCQeV73#wQkm z$gfpTK_U5b?yp{i%gvzK%GYi#zlz)6KR8a>rv#_oN+L6_fXD>0xh1*opKWuCjv3iL zpnna7xOnwQP{^q+nI9u=KI^S@e4~jtkd`Sd!<7o~-F%XzU+TS^0&QEv(ke@QyqGl` z%TodKR(C%g#gXoBUi&P2vWbZrZIo$E2?wz1IpX7Z6u0}c*ETjb(l~ou_}b=XLBG?Z zzfc|MTn$gH+|Y=7u*vPSi&N47Yk&cor!XdD%X78pFvn#`RCb?;>DOL=e*S%ikzvO!74~z= zpi-u4ObViG1`Ah`;DZ{z4JY52>p*n-J4!!Al5OrbjghL|ByGBHH*L@^)&oy*%f60? zSm$;F?gAmnZycP-N&QTDq4Cf1O0|URUGL=$W8%E!@nQ`VHJVxrK5&j@2qb@1nSe4j zJlnGz;67BjM5cUrqJR$LTP5)K>eByP1@F*Z5Y0k;rc_*)_|rI*B){%L2XdHRIBTyn zI)?q`y(Ha)N;8EYb$LVR75r2ovtKuDN(REPbEf%|yr9j1n_6}T&`r7DACj6)jb^~z zw!|Uo4oqNQY}V(;O(pP?$bFn<0HYJ|rhMx$=Ck0i|AEFQVZNK9?8KGe>BZE^CI2e@ zPm!p85cSC+u(V~8J%yB#cTc2bz);AXM-kW}X}^BGGzGsz@YylIbAAH=ufKjiQU(xB zi=Ew-`V58a6Q?}e*?XcSt%EjJQsHL6X5>~a1@0wCnkKn?(pe|##Hs_CAb35+h)$^h z0hxO-Ocfkz1V>v*QAKn37);zH+2QCbLf67MnX^PV@qvYU_)4$5ZF_0E=}_>1bKFm( zoXSKg5Nwi=u)X=)k7Wrxc}_UT-W<~ibfy96Jej!ry0)K5Q7&rq00yWofEpWP&izJ8 zgH4FdiZ7M9Gt|(VDH;j>MooHUkvDzXZw5J|&W1M~T_kAa{MRx)j%k&UKI9P1F;Odx zzLMgP1I(=zD!THxdDP7fX<%;TG4&k`5v}u`svH6gtOYnuC{9n3 zsKC6xgI!U3lq&Ip4uFX(O=Mt$rIjYgi9(|e^gR{nM>!SNG33;o8Q`JPpGlnEO2H0t zvdm6y2&J?z#j&BD@Ub#Ievmwbg2vIj<*tT&6q>;(?J1d_w>-&JsPvh(DQHH*zVja) znYo9Pwoon={aXCUYQXC6v>W20ZjGEYT3Yobw0v=ol+)pu{a8D{aecjPh#)HYBh9nm zam?H4qlQ!uJj@k=625@Xb|ACqa{H{}aYoxlrM(Sv{G@_9d#wHI7oJl_%f;14P$m-1 z@CTmh;xl6@_01Eo!cr|i46NEV>tbBNW7;?J8SxYFF)<*blX9rk!Dz6K(HgtCov|x` zv@Ke|lzpM;sWPu$%x3_Vp8Tp__;BuS&=*U^>PKsee$MQ;YP6?M3Fa8!KhDX(&<(V$ z$R{sTGM>&c1iRcM5moBcN6g~_R((gqgbeq5c9ZmBw#)A91Xn(7UHf@&Q8E3|Sp)Km z!0iv=!fu!P(2OJR9hdxVYgyLE#MvVL=|SiC0kYf0mJmBrW4Y{Jv|6FNTJVQW__3a{ zfk5=B?Jq^2S_5Mr=h9uajlJQDwU>o}&BDIZ= zesq`J-Y{HKEdLIz+;M3Uf-SC7JPb9Jll0hc$#mKJn!Rg&07)#N4BwK%c-yS))pxb( zdm!+D`G#Wm6#oVTXf9OK%)Vc?lMJwS*P*erZ(VQY{;Vc1`Do-bDe+5eTewvGBDnD0 zz|NNHuDUf&qpX#95LpwWB;=2|L1szS501ha%%X?frzJeELR!B&;gxZq;s*`yBe1^j zZ7JHwkt5&IVgnJIP>}4q2+@F>yk&vnRgX&R;GmuL7FD^YS&ZwIBz<@i*2oZ_BK)qD zP?jmbvK;YG$9C%EVwj4ZAV&|T>V80TvFAw+og~Jg;fL$;Abpo+=1h^&2&&o85cZ(d z7JFnTuaz|4N8N5i>7XwbsS`pQ5ba!?mhVKGNaQJy|HO2I2hF~Q8fgT9RT+BXJc_#h z&{QP2`p09R`d3WJVRf>kt=c577YdJ%lW)RP_E%a{XDEpax*am~305Yg2<#awE9wNj zlg-aFYyU=zOf4K95>3dVwbhdBkVYtt9idA&7XLo_ue$k;DS?k@Th;oAqZbOo>jPju zX``f}%Jh>DiYH8Vy(dU0)$>@+vGv>((L5#Q=meR(=NT(hGu&UNH1P2}P*_7)?4WZ3 zn)(E4OjTRyhYS#P+R;opJ*hu*oCO=6esRf)+ipz3^*l3BvHIwaeO(4snZ!}p<1b!@ znR+u&7zxxr+qVUDrIk6~;8UX=cbcX8S5U09q&4=q)*0;&B2#1bJk214>W0odbSo{N z+V!BIwonn%`st3J#C#$iH_}@V;mKKE<;P+Se$R;o4{2YU?HyiBA#Y&jk^~=1!&ik% zuQq3Q(IjmRm!txkHVTnen&BE5Na<(!ho6E!#i-4ueX~^GMFb~2X^Kxn2!NA#Sv!aF z^deVc@HA%HrUfIIOBOtA8TZ$kO_S9sOb`x36)E9G7#l(LdS*AZ=HDL`h*15h{i6qY zo}aUpt-ero?leB5h&M5^4O_d~$`;v~msj$JeUOY&70NG;;Ccm;EZ+6y9`G@TUmqVE2aEz z1%BJ7Ykxo7$Dy%FHO(#IAUw`{-s9_cMxPhPoqCyCfh)pn+Xzwp{{qPOgpcDqY7?PH zv1N7nOq{O7m(_yu@6L3rcfP1{F4U~ku?W+M@Rp*%(U^R#v=fR3ba{ug*y7Y=e4iFK zc-@4EFHq_W{GClKT;8u+wP+ydwh9FBaDgmnl&Lo-9|G_9%?@#cuP?38-VfRbO3w@lfU~G1S*@g&H z$#%D7*E9hog?9-BDy&)!9jCfZq1gjfx~fgj>@@++>kv`{)ksmh_t61F;VPP4Sl>K=2(ped0UI^K8 zp>-TBiS0~DA6OA8Xs+L3iI~7!Ou`-RZniKlK^C+Zzb}fIk`pRznDSaLZ3ncIvr4e} zP>9W{uFgVEX5)Bbb?;z2D-l9>WtW1YlpmKdj$hnUTfAzVZc&>@CPXPyzRDrcL#3$z zP36f#B@A$C*L7f+$Xlka3z2L(?;#u?KX4S^5WSlI*4qF8D-T6A)Uk=+Je|FQf^2> z#~y`nC8NGtIiU`fW}~CE*H&+%A?AxpiQ@E~%5i3oWLfYS$Ik}{{)t&6A9zRqk|y~1 zP%Yh_u%dU+;(Z8ZO+a(n9x=dIS4#yCWe~%p(rD+*Nc&h8qka^owHd@TTKu5_7CyWZ z6&tK^1gHnc7>2M1n5sIOV=&poZ6f@eu(u}#uV*AvpFL{miu-DQLK_zr7WfW5yM~(T zqBy3mWJUDxof%zDiYlCoYKVWAUOiJsNQ&E8%)l=!6Cv=`%;5eZ<4wNIBAHgJBqkqs z4dc*s0=VT&b0yi2MCHY(hjxA(85wZ1rw#+gBwtpd(j?3W*~G@s8%m`f>Jw2EVequa zy)@S`<;^uG=EUv-@L&=p?;KCKYf0XiGsN%E4AG5t|%R7t6;e`?t0KI)@ZoqT{^F7 z%!U8!TxDrAmnjWk?%7^5)zI1OMAFU;wO;h7zVfdgvTABiOVE0kptqMwgT`;e7vC|@ z|JdKzFEkkf*%uar|3uA)vTgm9o2cV`as7TX()$w9_FtZe$AuSHcYkCd{SV6%6YEYj zLNVz4e|Fm75{f%6V}Tk)){MLfs!TW4Ph#`()c4shy>_BQuyD#>dJ ziM=^zOhojlWs2ci2dV*60yTm*3rE4r6svUW=u++xbm50~uOYs<#m34~_>Y;94VIil ztwCeG!N&~}haV0d2_@ops{bCYaEbha(9BIo|4BOj`0K4Av226K->X6Ak{uEp_Y|rP zR6d7-9ibaOBblB@QeltA?`sG0uK335)rE#|<^7g`n+2qAWc&SRxmkD|Qv>ghp*DJw zIGmBwH=^5c`e!AHZ1eIW+k7chqibk;&J;Zz%PKg;Uk&_4S&lXEP)q9*;(n(fI^UV8 z4}V^+NXDv<<%bZQwZ-jvG&u2J zxN)9#19BLPn82{wN-}qG+@{BwoN}{P=t`qTKYN>Z-N0h#%g zX*qMI6>ApuwpGPlkm`F(>2)Qk7pmZM=x^rcUc;^pn17puas{|&WgzC}&JNOKh!?+# z@&J{X9d)$!$?_5g(O-g4PRnD%9Fc5vE~8a}`uMe-TMYr;8}N2W^Znkx?BoA&DP!x~ z$f*;W=!ZF1qb5%hOS%;dh8B9Tn>aRt_}X@y*_>^~wFH$dDz(t~7~(a5+@_ z`HuLTF<$;t?ORp{s}`pzw?M;igN`!bev%`LyG`QQ@HaA!o0|%o;4nn=bC5r$3i?TX z#nvW%)G8~)wK{k)EZN_#Pw-ILj{GAI7l9%H&jH@y7iC+iu|TDab-;{MsDUi07QSnI zVwu^GEL-%AGI^(8VN8T5z;7_ektA#q^II4?lXHwehZy-K=hTU^Tq!JWI6zA-X3X?h z*%CiAByg;yI#`u(NRBQeBgd~Rq_nd8UC`$o11gQ7LXNiyb^OH$1JuQ*shv$-%6}jP zbF;~(VkfOP6e0e_A|8*9Cgxrm#{cX@^D{==P+l;=0cVF_lu#K zz$vT)^-7S2W(gU)WHeWcfwvTyh67*lB(+235r)VsfMYj+O-%^ZK?TH*yQ1=hw=MT; zh+W=z$B*)KjSMB< zWU<)$DyZ(4*wQJiyH%bje!^z6j3CL5)EWlgWLL zHJ8Z>C&Jb9w$|3M%yq{>V@=i(Dn22tMTqJ)JmV5`D<8T}X?W!lRg~BZiq`x;d%oCcZo4{^hd^RmxPi6}2<@%5WvrXT6LtpFSifdR;50&BNV3 z3hTZFYt$FlTI0;GOzq{FUrkdO`%5St8!3jPcTj}LIVz( z&soznXyJ=)#|@$%aX93O1)B;Ivfb7wEAqwUGOH<3xt)D3qx<+Z-iTBJ{Dsn<6)<;W zjj1@uZmW7Nmxsh;KO2SG)M4YvQYOhFPBU~(7p<;{3T*`Ni;7<6*t6sm#eZ$SHpkT` z`fAf5qq?23Vuz{>WqGnmTI49bBi>yrz9U|DEQ2q@OV069WqQfoi9@Yomo-z)opEpK z;x$!=O7lK4TKq;cFD!cpo3~wQa{0s({JxZ!IopL0b9!U(hvl5Yn(N%)Y=Bf}R z<5-Sn{y49_9YMWXt}sa7^WF1%?2v_Lsl=T7kYu{eeKMxS=Pq_m!pb5)MS9Otj4a%y zy8&z4-Z;$d5DgQ|bq00@kG-iDqw5H`ms_hK3%fjT7Aw8g?u_JU?dnS&XL3zuP zo$5zg9KBpg@(Cdc+zh>|C+d~e_^R_RCpPp!JTfq3E?y0wrS1Vw&Yu{GiSDHpIUT^(W2Z8R+#*{bn!%M za%h3Ms>Qs;w|&Ga#La#1mjny3IX!MvtjOCurU1}Qsbm?JEj9;iADFk;#kxcy<&%oi zYyC2tD3#N+QmQZP0OOvKxKg_E6gyGB%bc<5KXRO0j3q5cpSl5r{dB8U6a$Fn98*)= zL$-l#>zsd$SI^7SS$I6ETQg3jHe;`NuBQ-gs_ne*@PKT)Y3A&m4F=Yt5}x{sdp3_Y zb<`AnYeqx0vF1=E?p3gyi`w< zeg#k`j31$Zq$s!N<)egt9|^YV5`3nVM=6^eaBKeM7j&7(b;&l$n(j%#09A|rWxY+E z;(8V-ikv3~Rr-^%p*U_lp5K1@UKMXG`9QI@Zi{XUcU37_;Jr|agz9F}j&gi^5&R~- z`wZ_NaOt*zT5(HO?W%E{Xq-Imh(py(;Q z6eOBY$Wsc1u>%;~<@N0NJ+;7jYw~Z4)5!6rTwUvQ~N8RHC9R#mG;-g z4xJoep&58&2>W4_a@zfkQ5JpG_p_S#)Pzu5Z=|Jz42{cB=a+S(DJ^QOJ(gScpx3_8CTa#LX`gqmK3pr;73hptj@ zm!=Nk^8We~vNEpHkoK>F+bP)mddi{91AQ2ENOJf{%kM;a;Z03Zb?>-&TLd1=Fbum; zc7*nAD;nB8XaUdL>*PUa38Hk^hrISH+}C6sg{++9h1q7uYfeTpxcScqej=M6;%MJ1 zMVxZWaiaC|eY2Te!&65`iToGixFCu9f-C$jD1~a$?(+a;jkmQT<6sl_QI(gQ)UOR= zBm;aX$nygI#MG%6R-0HHnDWw5E$}u(xNfh2Bmc~4I@8GMLl@JROZYvJz+`{Ql#!Iv z-Ob~Y*T0VjtA~w$9Yk%(4<5ZpCx%dd-#p0xowt}(#djX`>tBrDc(@gDgUxB|3ooHt zD4u;s@CoApsm6cQ=G~C(F2)y3{u7xoR$(q zIZngrfe7Zr9GgE+w&lm9FMFsstm#t64zKmEK1+(vwpm6wcTvT=+lz~BvnO(x zeg&JmtvCGr&H8TrU8Pk%$%ZO_A<(YY2 z+ufm_vv#7eQ?}#MS-hO+&^&--uQl@H7YlGF~xq-wR)L> zHwliIjrbyso+GG`Y%6o^6~HCcvw!*=U9C&Y-K3p*$i~0@a+~u6e^xbcT_R0=6APS= zMPeslHWtd@DWMH1@LkctAV|^T?&q%nU6A4Gp=g2sEkdEv zI(3RCE)+K}{?7@Nty5_G{LP&f9}Ry*3xl<`{B`|>bQ?4J9*M4cc{dX_6V)Lhf)bTs zz$JkyXANgwkvucV-G_wo;0u)_JMMSAG?lG_&-Uyed;YDZ@a5@-?a#+JMW(qZX+3JL z^@ypoYJI)}<;Xc2S%lN0+oGLF^SLZJOkfA*+sSq+yC&y7`_4=hJZXHDj0iehs%1Z= zXvwqytCYF<<{uRCV$@(==72J@-Zj}a6sM3|{77=?dReG7wr!?L3bUBE8-lRyPcDR?l&k68O|*YxmEro5grg_|>jAUQtGk)WT{vq)xv z=>!jf4r=3z)np`Qbr5Z-ggisHEL<4sU8|bB- zJex=ae!1`D|a@{m9?t=Buq= zFT!g^sWj&AS#(33PF}%}?inYhL8=ag3F%(LHm_`|5jF~&f!{>Rdq^f7Cx-pBKGPtg zdJ)$;!LupOIYDb+5B>;IY+kRjh!OtPLY9oW_@7!MjReym1=FTtpI{0Y$4)>qHK2K; znE?D*x2FtklAoU{GYcr~tw>QKx9J7@5rQKg^c#&`q}n#}g~p-lodY*c6uv>9-nL6Q z+E`@PmRwJUjTi?T*YNk_n45?~$-2rFeZvah*vb_2rG@NzDX?kn4p4Qt=M3ux-Q8tt z5<1o)5H|L4((eNN_iy2SUovIkUN4fRkuMh8(qG-`d&Z@uxrfKxd{&jpD&wK`LeI?g zi&sD)bLJ>Y)e$w_z02$iokCNr;ZBAzRc?J9bJ5EcKq2ampptto`zP(^z@7alNdCWu z1m=$L-3NsFFTtx4kqn`adJ->lK7(_Lj3PF?d_1Ef8*7J*N6oVg^43w2y%DdQ31Dq~?ZUCEeE8*$i zFlmU*`@Yo%baN5>66zPV#k5aewim>=Urc_{|E{X;KYFCrJ}SU+Hr@(R9wRePa2T1n zk#KZMiRlxzpVDF47s_~So!5-Rd0ccmQK~8b6!mjUf=oTV^ye&>b6#FX&L8T+cH*P{ zFLO=ZuK-qap#ADSepR5fnj<^V#8v-D1y-wuiV*Wzww=*Zxe8|^>Kw!kC0KSHd|Y){ zF-&vJJ(kC3D#>z7aTN{X_4kBVj44Sd+Zk!MjfZSme#qncWxdm)ypkfCHyesHvqaZ@ zL3*aMRjuH3Gx}};No>5zzQew)If>Cc2q!_Eyh3aj`xG)?4T4u!I8e&gM&p!Sene9j zngZ^}IV&?Xi8xy4%u3YX z`!p6XRM{$tea`>rv6t&wkkoR=oTEXL!$>O84?vd;BS=_qob~WE(PVWrs;He6;`#Aq zt6iTOAGDq@fkhier<5DUy1-&KR|j&soy$!eZSqFC5ZDxehGq&HmM4O_r;;ns!Wr7= zN0Eb-QQ$9Ta;~7{_^ZYCZ^-vWHQv6k!K#DII-uw);8$V|SpF}QX&m%T`(ScB|1EKR zH!=L)biOdZwk(%o1B-(0`CF&Aw<41uI)p#Vq&jq-eQ4$IUfdFGzF^FFVWCO*P0q11 zwR_o7dNG}SQoZU-elgr1FJzMO;lD{q<;4H~Fl$V#;Otl?RR;=G|6pU=NX=JD-=MYI z3BX?PliC)W#M6%){nhIHeUC{iPqg4of;O`mf~NM5=J>e!cujii=5D(5;(abJvCD2c z@nze!c-hM)M5lF#=3Uz>py!lsyE;wk1UH_dsmr-`TO@$i*`*}QIPik{u5xX4=yr9T zp^Lx$X87Chy0-IfM|b_hI~{R>KF4DUmZb#?#jZn^hjH<~Db>IB=#gTDiwRm3rNFM9 zCAtisKS7s^za~=ON-CDlY&c0O{L7&}zuGA#nNRJjZTa(rY8T$i3Ey{~tIq461|Mpi zB&l-R?ek+0_JPJ#b_=QI0;Ld>;i$_wq;h22kZY2bHWd5ncbfJzca;Dzs1U}qhV4=0 zK|zelM14adK)V$*mt9m|aDIijH=R{l)2m)~|Ijkel9VBF;5VGLb4$$56(pC*dZ@tbRy!Yg)+vykn``-~(~f^PL2+!L zv9&6;yQrN?@jH-bC93)|#a_*2w2eOkEe+nRAU9L1ifa!fcX0p`;t$*w*R1BfsV`*Y zesyVrbN*vMi{Jdq|s@qT|i-+%<9Q>rE$?77_|)8hwDZKg0~b{*+avOEwqFIIE&? zS>gDI95^cFWp4~MsX$yq4xpE}b+%yW|5+vhjLY^s%veGd4k2FL%kdb;xr>rYq@jW(w z(*>hy`@$Kp2FCqjai_`UI58miQT2~$K;S>*+iI6>sc&So)aOe4MW-Iho$`kHB#l1S zh4((MfIfvMJ(}Nbbl-Nyi=gMX=^j;oZMj-TlR_7Aixb^GRkISjkb7T+@>9XSMZM?G zC5HjmZCf(#1^TNJEJ)Px^$`l%dIaVTI%=oX!-V}`s&6HJELQ^s7xS@JW5X#ER8G9d zhYsF9oE7A2iYQH)r`Uo2q$_NQJOn>q+}3>yKooc;! zB9HP#;`i+z-qypS z(=>yF87*szhGpH{x4E^(B$qo*H7zwC6;+Qut?mhrRC|T5BOlRHfWlA9%mV7S^BxC~YpZtl;6>gZ z4zT@RgN+BVvA)XUz{d|LkEsH+a0+juQPd#QrtzYib~@UjPa+CU@g>}h{_^lQoL!QO zm9B5;$bfIka%l;r#i5R8pr{VkqXuUE=U5~9pQ@`oBzl{Ma6Fp7z2?IH_#&V0>Bv9(IKX1q309(@8rb_1e)`I>p(yc6T?4-<%S%Q!D zWrVf5kS%g!a)I`c(4fo(ZduhEPm%?6fAX=+vCFoZW%djil+=;4457*w<7|4$eUa%RhG)K)vQ zKL{1gsl|^}Yg$$I9xr{cUIBJ$1V!7AgazAW^@JMKd!h6zGwmhU7YmpyL;dXMNx zCjQ*$H7aWS=H5Ci>}-9^ti$w^FYFJHnDwsYeqr3SArwdX*X7QkUG54>xA_mUq?UUj zeuhV64?5rpv^s8G%ACfU7(AJrur?t%M@4cQeF)@*f!1+0Pd>*&bGPEl@-$epx!23w zlZ;j?m-$mfhExSLQe0heIo$6p2h=Z+4sJzQB1)kk9e^sZIIxB_O&8MAdjr0@1`{7Ib(@7a-eGyO?Ui#anz1orL?9h0K(sm>@ zVN|%QuRX{@uyjP5o8jxexq|qF*qlNg%X#S{G4)Tp#fZ{c@E19_Va6hppcQiAp|Zjd z27mdRHri@NBbE?FfOHg-Ck5k~O_(QdpCJwj(GdVY)>1d4A1#2cM6M;v>GSeJ)C=^B z(6|Kub1>pXmv$2@>Us_=BV-}6O${vjFb0hS5dZr*Gj2B5>k?k%C&Y7GUXnz`M{`Vq zg=jz8N`t1FRw}C!_4GTDp0TH&a?U!Kd_8ZXNVSJj27gDmaubz>ko=_6Zs15%JCZka z0;I^xXj3d6M}kqqL)a!#B0dHs36bS^D^&F1kD9YeHkC$NCU=TH1f)?=rQxG`R8&mF;vTn?#B~Hq{<@z_G z95A|!2bsTSK*&vlEN-i>?m%1@A84u>TN%rk>5<7Y>jJLT{}|)WgIv;EVT(~62XJGu>8tYxw5DtaqO5FA>9M}UFZI0QG8U|LM2V3 za7LE+KGa{s4bN;vagLoL3iIqmqvZU~lvEF8Q`be4XL7AkNNvk=$(YHCA1MH?U{V<^WL_z)PswkBFHKG-Dl?FRBi(FCa6$M#hQn4eLm>)rUK75N1`Z>`(UCdP zVLlWtp&^CG{{xpmXutlo-MCA`i1@cb5Nq}ZE>iJ4HxZPShElFK8-lDxee03_5%kBn z;I2JL3HY8=5czF~=4hr~y}FJ4sn>$P{i5)P6W<5YbqREN?dB3Z@s*8bSlq@?wlnkK z5HtCWZ|!3VO4>7nmoKMfp0Dft!1Cd+LSVmGXSZPA+r&HcgHl{e%!gQC0^Sbg_R{%X?cj1U-L1}=O3=!>#lwZc=j(2 z{9ltzxwU+g2=vCn!Uex~PoBWTZ z)=`8;!OQ0bk7{zH1w4c9d(~@9)_LSH2}7c;9HGu!o&Nw#5mIw%(c6%Z^GtUU59J>9 z-llw|hDdS^@_D^Up-93{mzxXd2IiV1mlwwa_;G=gipRW;)0tu-d9n6dG6@m5B zAx5~4Cp{8Fj&HSQ+RCOS8m}!gme*<7r$VibJ&~Mp*Evl#o^$-h87{Iqg0ip5dh(v0 z^&NdAO2jz`dW`I+uKDj#FmW}WuQ|1ju0P>5yX&d#x!}CCdy8JV8?yU> zS;?c8!!>=cuAOR3p{2AWD-}2>eMhZRwS8^Xgpgb;sJVFLQ-yW^03Y+G+OC=}ET%er z(YlS4><%T2`s1~69w*{!z8FdEFGnDBX(5@LmvOn=orijhcrz%{-p?rhCiSz#Ux+Pn zgMsBkZju!pKY)iG=LCMWOj`uDjU>8Pz(2ya&g^@W@9KM2CcfNL#kz*Qa*ZX;WZz+;PfCPc|WOY2*!6*fEU4Mt~`0t0cdo2RzPtxu6Yinr>S-{hogTr(=ME?LH zmE=Pa#AM(B)DU;}l;68;$1Qnt-@81+S-OY!XK^Vv8f`S(lZyK*J5p_Teyw^QMYWAy z)-AAk7bD*r(F|+QQ=TQTw$M0(P}1#MMuNgtw9JPjv}B{5fY`FOJ&35mK68rT>iiRw zUuJl9`bzGqk4?|>Vx)v*kSJ{z>04=c-iFs90otOZuxs!RXbH&!>x$HA%D(lqyAPUJ za8&jcB{ke&-<7%8(%Qo~zgaOlfoGM|5L5_(F#Stz|NbSsVL5ytu?risVB84&yr~+at$=`$N`c<@%vLy z?4XR(w?H~mb_C>LpYf(J1)PJqtc)-l(SmXgm5NuTIPxL9kvYljN>$pNI}THUK#hoU zH#K6w%tbrXjgDJEsusi#r$hTLY8I}Pv!M`#II z3wqH~=1fB>OJ#*kN^rbRn;Tz1VOIb&8C#tkiiH8D8IG@mCKIH@vXE1b~FMnS0U$4~(T zKn=j&jL|MSVuaXYar;pK`Fhk+14@BwVjZZFN4DmSuBn`Lr-lO-QiOm$)S-|)X~Q}a zPih&FDS$G~z9?I?q>OGZ;(X*Rz8hlxKDTrQj=<9`=e zTaPLudTt@*#uv>|FDKLI+uLr{%9?2sSm%k_+UgfsV|eytkxo_A_8@>qp{}>%Zxu!2 zPakW%LXb&s4eYAX!zS#gsHnNdz~MpKxjp){Yy4sYF8*STy)u2hf7Urlvx~1eQ|XtF zy!#hTx2X*_g&U>uC`Cy>J*hN%lkmIZ{r>E6ZPitM|3x&3e_~7q=Fd#cb1%{Gvn;jPzs7 zT#`Bf2c>%7OW}KnbA5j(lT(yrv0;d_{HjPe`s43i^|YNzQJ0!NPSk08grFaT#x3Edx$Fh?$3+^ax=mMzd{IT#HCz2)8(CS6?ZeHu#tsR< zHS77}H?eY0V>QA82wAx?kOp#bsZRIMeuKXp~ z>MV6?R5bdQ3f)em^NDv^NpP^=Z1p@__`vY;0ph$aAI7nz5O7|AO4J*C8@kY*!tVB(&lJ4ugp85U;ElyOnke8TEce!#{$hF8^(MB~*3qDz$DE#h zuIqL6dBI#$P?bEcGv6I^<$}Mj@H^^oc~>^J^Tt0bC@{GCGGPAo8~wAWa7DCfszY(8 z=pzd9Jf3W}{)atE{{YRBJ!;Tozh9c)S&hf{W%)_}0L?@aYH}ki_E6qN-AoO?>N?g= zF!(v7{K&~TdrjgyDD^EK`<^)lM3J4r?f#8&e-?Z%pN{xrRM52d7-&EmZY%Un{Po>N`AfM^kTik8xDz(LYNhcIqPpDa}ws+8{ zmwRU8mEje6Pmo0nmfVyEab4O_IE zr;A#$zUO#jwwWbE5h_7e&p0E~8-BG$(QGc)jJS&4c?%e??jwF+RI>?REI|N|zpZr* z1BG>dDZ5K86HUC;?uZ1%Wdlk_xe7KPTJxKTxLip`my_%5HneK#(5YFZoK{=m9;D*` z0L9LMANGV?JEUD-B=<7S8{Hp=h9(jTBXFogzcirAOB7}2Bhwsy{{ZnL$9lGW{%R?IiZEI`BR^nf(`Jz*aYkVsXYtub>fb(5O&cZ(xZL(udgFkKUGr1eFzmh zbNDs!BT2OW4UV0tVSnA)UXEk`0L%;hYiCk=m0yc%OVMNg=Mis9*&7e>My)wM?ov|! z0QpbXkIdp)_N_k=Yg&ebU%{?tQ|z*C2ZkepE4 zXJRR&0hV~kz%;Z~Wh8Z`EpAY6vGk+B#T2RE-lmYnxdS_A6^2DCicc^kia|kwb5cMl zoO0}QP1(UhF}*0D3TP}qCaj2sEBvEiYbYXM802QMr-U=3`I7T=G&v=cn{a*VSrtm1 zu}CnhIoma<_*oJ;ZVH38{`6I!BXuz>u0Y3Q(t{nll6R#Wc@*VM84&RSD}dj}rl|zv z1N+fDsnlouDgz`E5PzGc5#Yj;kbd zoL0UfX$b=~Ex&2Odhq-pFO)RzAM$|{lgYVm)CJR`Q^JxhwU zL1LGJ2yG%ro=Ik8=P!j&a0fDOa5o#&j~#wD_5D&)KLgt7x@MBO3of~7B=-|w;I}GD zix}AKd5$o0GlALIQL>|J8uvW6+v@u-)b#IX@BN^gcR*A4Lp@IS{I8cQygEjXggZOdxjRM_{8_CG~pYR$qQ zk8UTkxK!eu3rE#$$-_&enQY`y>&qib8|jt$*P{G7{7di;iBm&or)u8?s?D-#`igln zvHt3ulymH?Pg>bVhM9N8H;3uR;LeU(m$q65`7WaBL3PSgUsoPi)uWF72Yfr$I1_?( z>wQuSyDdU(Rn$dw14kviN{BEOhH`P&p$b0!m>0IXc0)@PE#2jBGDH5pwAVo!h#$)4>DxMW$U5{{SWhN<8YF2RG1<^Q{RP^N7#fioXoj4F&b% z0!^Tg)69^6g-2_84xhyFM;5cEX?l!tF=*26F(^WIBy$aoPl&RhgM&E{lLK1nsvI{!D|(~C@`_b7*$d10KlysP7Xo< zOn=0=AK^-Sm5(EySuy?OBvW>{hFtlO6YtA|`c(1cVdpO_M5}3WmS}C?+%ZG@)LrhC zW}v)(5pH|Be87L=T9!*4KI1v_6_473FAgF3?dO&w{qZP2){|TUDz}nqf&wA9hg5-o z?&bWzeNW6kN~P*>rm1li<(xq^8)M5fGVansavCywkLDiZ+|~a8Skd(>c-m!!H8o%T z*$Hqz<;FojQ=dxb8m;tKvwU8ia!vKKa`{rD34s~PNV|UZi@B$UX3|;cdE>(0!lc|y z_=n>wt|Yqu0Py@b@?S>^F%n;_$Ic6$p+J+Bh}@MRW2pDOgQDmh3B%$II?~5N(rwq7 zgkfTh2X$Fpr|FuXwT?9E zRoOzCOVf@xk9X$#eh0ny-|=_hX9NdH_>gM13$TYqx03Ul{{R{SgYLq(?z{g0wI9PC z9+kc;g}Bt)DFEEApKZuLTY!IR@wDn0uzFKnbA@#NBGIoj$aCBI`|pa@ zM_Q>V^AwTT&-!y^PK$A?DNh_{31{y8Pg3~J_?hs#@jv11to$LaNHxtO=t%lrqipu~ z1}0J;lRqnw@)8w}0+M=HpS~S(K+SSr2ly=T*0$ZMIkR zt$9TkdQqK%>^H4^QDnv{N;wt{`8?^h4P)f$X)Af<14u2GA8u?nMAwm+R7q*QB4zniM1 z(8Vf)pK4DiIjm9_J?UtptuO+}st8kO+Hmov0IADe1iyNe)RpC@m{w z;-ruOHitO^vX9PjlUY{aXG#dlr20~#=yGytMkyJ8En?cTZcg1RN~7c>4j6CNhsqSw znBwL9-6(8`&U%ViXhKzTFtlX1+y1nuVnL9nA6hD?2N}qrAP4Zl0r#P#AhD-{*tt1u z3dFMul1Tk&@&FbAoP$MsaU$axqDOCnccX;B!0YWuWO1@6209ud+w!jCCZ9(e$anoI zX57Yi82eM&14xKe{Ip~Wz=EQPK+jF9QXnIx9e@uiukw$*VU1)t0-Yp5kmP=plzgvL~UbRJQ?7b6bueTOB)R(xiq$ zU1;%MTQoNjtf+Uv9X-dP_o%^T99A!HZp>nYvgOWB%t8J0S4k0K{GGp>xvE-5){m$o zXZV*A5&r-wV&DBjvT4oX$2=a<{V|qrtHr*NuJ%p<95{MuF~SANP`wIgij_0qg~I8dayf)!jd{9>1~a9@^{AMzZjzOBHHx zy%%@-FIVVT_)x4Vlm|BP=i*7}))frFJ>& z@4jb-=GN?}&zC$PAJlFtjp74EuW4;-s92j@s8ytZf*DGSm95t&Zqh}ncfLTK_`W}RSwb(xr zJ{j6*!taY?vM^14h-LXP9PJvj^RDOSWLD3wC_QsrD^Jxl3vDRh!E`Nr$0Q>+Daaf3 zAbNMjae9vFR+4Lz=x<|gsLJ2Mr^1JyeDj}{TVF!cJLh#d-+Fgq>#173n-G^uIiP>< z5&X4BkB)350$y2|^b#Y-%Gmb~MLw|N-9J!q^L#v@?8!83`v$FFhbXR5=_wNdlN@&+(Rza}z7%gCSmo{{VaMwtwqR;gE&K^@l&T z$?})Bc9OK#>#1~>B)(04`7w_n!5e$h9$=^PnoP2Dxl>9gp*N@;I0m={PTTs5NPbXB zp~$Dq*nO#NtQRC=mU#?-AUk6@6sLs$ z){0qJV4v2XL8dlrH$jR_+@l!A^zC6zQhFL`aTz4zdXfPoN9E-2{xlwBZeRsxBMx9` zvs*~4a8J$LQM3ZTm(BnJlCeb0NygplX=dOTW7D+^APjJKsioKrn|Q+m*if61Ndk!Q zw)7byA-d+LgoYY8)sUX_nK@Q7y#(_p+oo$_eBRr7(!dE*obS@Kz>`EsI)T!O6}gm= z(-khDF)_N5*zex9Rf+T%qfEv=Wyejnrmb_!)|N0aOvP7v8o4ZeDGzWRPpu^<`A#ZG zMl%-3J*WyTLI0m!K|t61F?&MtAVhsw21MR zJ7R|wsm9H*?^}CZDfx%ewXQ7*F}qA#8*N**2_urjA6g_3>~cZJZ%Qg#sRZN$=~3GJ z#glSd#-tF$8dOoK-jN{X8{p!#5c7X2#wrZ}S1P##;82(5I~}Vhh51O%aY(gR3%Th_ z06FJWz$E9|l=obW00R^>Q16qyC`Oq%#(uQ07!3{6v;3npN#~irla_-N>l~Os3G|~) za@aTkAEh)jY69GZw_jQza&R%VHd(os1nwzIF$haPo4E;k0ce}{Th z%`f)Gudkasd0+Te=rZhs`H4FX^S8EZI@XM9$;L+7!q%PCr5U_DaC26PLv=LbAd8Sq zb{$8;?i1n}8f`0Cy0w~i<_$8Wu$-Ooj#3BfRlYayGl+On*>5d^Ynpq1{6?Ov#Xsjv zWPNuY^i;F2PcLp)p^sY5wGXu(3Nm?3v0+9r#uM`jV6YW#Rs=BW8>p=nW3eX{gS!S8 zb|G_vkxEGdf@lG952&ps5m}kJRd*$h1}G;^oa)I!n#+77y0DYoc~QwDDdj~hao1|( zyidd&OFhec3^ouel2#cZna@&pU;ronP)EI9b)9%HF++0dp zNuZWZPz86IRB9@DSlIdc^f(W0s@cxz#!g;5F&=qNJiO;TK5>jP&lUL$V6?Vn zMcB(YD4%TZe$?_g8y`IJvmfq_zpZcJpw4`yC*OK1M3MefRr(6(PbM$|N9O&jIa~YR zwJNfhgz_Kn8c8fO^M2I6#$qqq`#NwKX3euQ4b zCA`C7a}Gfj2{LivArZacwg~W1X=YVX#RbZuRyX#!d*d;>`@%YUVY$iCMJmR5m$VAC-@x zz{vZG`B%r1w7VDf}?1IU$WlPfAh-n_~<)^vIV5I?@k5+-^~Gh*n>C%>k;*w;b|W;mw_Z+cxuItuG!VNm9nRH(2m}*FF2+Cs+c>RAzr-gUJu1#{ zA(tTp>0jy-To$4a><&i;))ty%FF>2Lxj@ zrho|fSFdW4Ks0%Z^v(rgyJa6H*`md29a%}MuK%lVn ztSYi80QD5`0~NJW*v|BW5R4UCc#-)$+|&cMPpve77mz^aJDN)dLTIBbD^8>mPC90$ zkPYS~KpeZDU8^aLElr52ur7Pj?9^IT^kbNpq%jz8r)2^qI zcFJQOwEjsYa!96jsX2TFz{M&^6&76`2Y;gL7}#s zUQMJp`qJFrVUlZo3}f>HS@ChQu|rHBN#tLaLUZ1n#bmHTGTAukMpzDGWn>v(39Dxu z>W!!H^`(>?%Xqf&fy=fD0~Jh)CNi!%DFptt(70Dp(Ky43E@P5dB%UzRJa74zmwukx zR_jbzl$p_Jv0LXX`4&hn*F)0u*kNGPGbAI|1M?ry6H@v|jTgEeqL!-C%8~-b)O1nQ zilyq1!>Q^?aHX+k9il}z8DLi@>}nq7!t&boOX(z22bb{hpz28=vmelYwa_VTM^)ix zi7cICiTp{>b^R__-$9d9zQ345n`>;z&UOx14T$TKdsiFQw5yIA)Mm5Pg4tJL5f6?0Op4+*-!rZ+%fhvaqRf7~R5Kd+ljI#XSB8mT$PS*4(mNI?X0OGi6YI z*)>_e*5K?Hqj0`5T{&W>@= zgIZq+MdKWv`J!k#PMHYSvZk`ucRpp$tLgN?FE#HjRS%SZyr(Dp$LUfWC7|mZW3F0g zx-F~fI)&TvMGQkZ^(U|ZZb`-mTB}$=f34ofV_|IfcMNc`#xj1j=wA<{)%+*m`Qfv% zn@`sDK4EpMvNFr$gMkc0fK_>c<|A@I>5c8D2x!tz|c zo~Nz6CE?vGz^xfHTis7Wv5(0|ySkp}B==V&Wt%KOSl9BX!8;SSYV{9_+#NG=jxW&p ziNFH>FaH4QzII<;Jd(T4Wand&@*TUR{*k0F<0DF9MB0A2AG|m5K0jE6d7=b{B6-Is$MaE6G5-K( zh8+I@#%jOr*MIYV2*`fJ=s3cl{{Z_-_dQEv__N_#ll)ou!XN$`+c^Eet1h4TkKqfD z7;#q+TaQbvJ~qexRj(FKn#6&Gb6di_iVB6kN#G1`HN;chLvQ*DwmP-Xr``Vma|WmM z{{Tjx+1($MzjV)kKjJTi;2hC$?Y`g1cce~F>bU-t75rE5)wa@Wx>eZqNbaL2>|sCF zybr8UX&8NE zV7U7u3fWM-zPSGYhZXqa^EpjV*?(p|nG9$S-) zr#CO^;cDrk_@ly{T8cEzBCxuVx?8lU&;J0F9MuBb@TcMZwuO79+UbB^NeM;1o@bJH zn1jmWaJwDxft+`*F=<>eb>UZtc2V(ml9Q#Kz-g_nBM%kCS-~rjh8Wy?jrtnNt95m3 z;_D>S?z6$qSm-(ywHndWs??20EU3$FW%5fOF1%$P+v2Sv4NFS8x4MWuoQip#KbS^) zHa)$o4=)Y)dA7z*p4H4B}o9*L3MygakM7 zK+2L@>EU590x}6kC;<9(`&X2|7+w@;TuXm-a9~I#ytP|f+Y6-fX5~g^j!2K?lLgP@ zCj<`J#rJbjqv9@Cri)0ZBpWAr>l!J3n5T10NyBP<3^{dTF; zjZC2J%nn6SBhs}G3Am5@8&i>G)9#Bst>-}??eskX9^22j0l1QXq=3Za%s?2vOo<$*_ zzS*a6+^m0{BXUN?hT^fdos=qJd847Ww)(sgh6SfVk1!w{!_#6$-)e5G*k)slYyqmC zo2|=hFlcR3=>gpcVNyjGxlF zCWrWNw0HwuaTBY5bXthV*_rW*ojYo^7Go2_V`Fgh@HO+`;>4pk0|RaABuJd#k?mc} zRs1vQwvdS*iSBLkGF|LeMaQx87OIyX2S;knD{&>f_QP~|ASIZ8%1+;{Su0A*{Ygf` z;qan!pCLp|k_iTZFjB+Q)K^G88tDlbNwt}vcN;lHEBa^rYOlpS2R?-d#6f)>?XNZr ziaA+*GPyf@f@%}Ct4(fefz`3NqaD+a1#RNyLCSIONf7f6xgv_Gh`?t96a#k@Pk`C& zSlMR0(xZ_17*SXixv;*2?MjvKpaM6h*ApC{Eb@Rodeh5cz?mZeyA#vBH+6R9U~&hg z3Cxe&h0p6+n(9@7mNAwGYItb|M@$%^s^pq%?#UriGs-BC-^m1P@)F#%*)HY5%KX{f z)RTDR4Z1Wc7^%TEwvg;pZ|P2E)Io&h20bxAhU|G`nE)82xr_pb^9z43UX-aORXl^Z z$)m?}A0vT+M${Gv-#EtGVy2i3KrBgNjP|8QynvCmY24`!0~W)M-Km_)GN+he)YI$$ zrD+|Q0gw*lR-}oG4D=M?8DnM{&o_Eo#pZKwf@r`Ez8ydt^xqU2hI8*rx>iNz=G)SS zlK00p)ijzK7Gc7#Fgf+Cp|#djk_HCVl_im#5-V0N%gMJ)j`X2u@i#9i!u6#^cJYa1 zb>~x#TpDnbFKGFW zzItpn$?Hpo>0OLOs3UFr(=Y6?bt3aOB94V=#lD|xSd%BtHqA#5A(6m&7A7ArC_TH; z36w+xL}9xM50w%FhUwchw3ZVeF5u_$4%IZ6hc>~AuyA%AX|aSQ!6cAJV@Fw*3zLU3 zBMbpZ8InMxl239dXl52N#^2V558;7Ltv{Omk3Zh{nZ~>gszs=3_S5OnhUN1=Dls|Ows3lMtBl`^SG>@s zhf;<s zJc|}(izk$@!6UK8_^8?7w{m%-Xpi@nxzv0`tyq(ZWFzaEa}SBPa2#C1l6vzl5A9la zO}>Y7AMuw7V37gjb)5%h>rCZn$o!=n^%WU!;zpNn2b~MTN$Rbdcm1uVYA2c-~rZx3$ki$F(fA& zA5l$)n(#3(%IHV;TNLbHV0_!sh2k(cAn!uTD`^c&OCIkdF+Itoz4w=rNzb)z?m*IQ z=|Yc9d%{OL#ygT~&O4R!*ll&I%Q}$qHzq$QY*Q%Ux1K^-3pc4XG};E84)-^KkD}2d zxY6tf@S-wfxioIaL8$qYJ{;@Q_;^fYbKT5 zdNFW>u$iU4(6s$Zy*rzlTc-|>FV8%r@Yl~LvA+oXAAdF7-x^7&`L=V3Z5j_T+DW8R zrpIAK7zFWo-?ryX6F|Jjd(_!sP5W72UrEy2g<| zg-fQ|&Zkhbx|Q!BV}?@g@;A630iRGs6OTMv;NJnQZuE{A)9vqgf_G^qh7*vI5=ncB zI+K&0T<88#gIyD&2V(^(?T1(2FaH21jz>_rr|l)vr)}cbUkx>kD8*%E`ePjTPpUlm z#J&UZ*N40|+TN!2*7oFtmMAZ=DuQ|tFdKYqq@Qi6p9D+c^x94JuNc{DPkQH?-uF+r z4ax20u>(Hmn?BgywZ?pXs&THbCa1(&r^Q?yksa`n5ALFLQS|!mY6d+{-a-`|u=Q0Y zxv5r!DrSs+A6`#dqhoh*X|qtMyhihyd5%3ZmfzQ^>|1!J@c#h8{w(P_hL;x;M-TA) z-95Efei~JjWOA1UkR8SwI6Hy|TJfhCaF-Q$SFQg5W^0hN5vk>mQdK@&soMvjBoAHs zf=2c1jt%2e!rHVq(Z_Fg(Hw}cErhA;aC#Aqh#-pT+)3gm3i!pLO{H<448wC9saWr9 zB4nBpcR!r4a8Bh(!Ryz0&$q2f!KTuFT;uh}<@ug#s^~ZRb>)L zBzMMY+Eh04CCQ`qhg+>)r@PR2g6od2KcNGy_@AV{sUDE+D#C75Gsc)I4&8fps9q}Z zzCISahRvADUj$m=q9gljbSX*6j1kDZH1}Q!rg~uajkb}NGdQ!9wCX7dS zYiQ{r^JU!*LNM42{WDbD$S+_9zd6eef z<5iKI-|DY=0Ob*+ep(NFWRiaL%1unDTrg)Rt!@oWBRO(Cz^-yvm1;%8QsCw4UtCpA zGnP>#r=OTGz{WQ;+DmAbTmlH{H>L+udx@DB-*I3i-O|bi<37fj%W%p-+~DSp(ZC$OOCH10gCfRN z*nGPKgX>aQV-ol55N(5+(bS~@uy-Blw2_Gc-0$hykg+nMbZ%lc!KS7Gn(}6knYYhs z1b0%S20xd$r8H(l;Cl4%HU1o4*SrKa-H26?d z5z%WoCXL4@Gcnv8R8z=<%91V$799SgrD2T5tCkM=&T+LixxShigG?}kVh3t>4ZwSE zFh4Ye?g-l*sprgKx%r=$3!L`pTPZMhZd@wunj~_y#I7Cz5KraB{PLx?O1Y;kUy(vp9K}v#8<=+{py$6JKjdmBDY40o<{vJ;#Z+wz7 z_NyTY)Z!~F&a$(e^=Q$i($9AAm#bu5v^>ZpcOoo(%{IA~DA|l?6rTO+M~O5&b43jI zntaegKcAN}Xzikq-NRT85aG9Q7MgFe2GKE z8Dh7P$2a_3xId_*G16|q>k&WR`Gqo$DD@bOkdn#R9g6!JkGZpZbGX6ws}e=K+F3+} z&kWg22|*hVR5+-6jto^|VVYQvZ9;IpxSF$j<>T!~bSkx3eGkbZ3aO;nMUza0t2)O3qMj$nN`MK^vav52284rBiS5vYTtpgWFZ z?b??Pj-(&q{?$FJxgGOl-jCvzwZnH(+-Nsb=~oXKiKeuW#>Ed%NIfiobGAerj-<5G=Oc}6!C5qRIDq7^~Ur$akPwSMM>0am(}-p0cWHnA3ejFAFd5L zxYQQh`7x(F_{fMH1s<$_!@Wn|>P-`Q5Pa86Grvg3_mB7r1+Oit5Uf~}%+A}hpHoz{ zNy{7}K^9w_i;Iaf{{Xv*iXi(9&)k|r*jpC)p+ZL4BY)O}E?XI1wYcvDgOvQw?cS2f z6xVV@HN1AxMsKcd<6+8I&Mu$%8zc75y>qQ^OVN17c{JT()g-)x5gp7wDP_-9Jx98n zj)#8ME68WLn+OSWuvG+;_NX0Wg0-la-zAOYav|mt2pAvfSulj9%yXYJ&%LP9Ej0>R zl+`-1`!dQ;@@dt+Q zlTX$x&5i6M9%ZX@B#KAoQ|Nd4S6$)Vc{N2A@sHu!j|C+q6vAi z>)MpHN{zP&q>M+ZR&DTzEeh{lw7!)^wY97Ti82|v-<5_6^xmWfn33BEp^aiw`9c5( z*izoioJb_d8y{`@(xpiw3VFQ1yMVob=~y+cD%?EGz^!vq;~U>WtXgNzRht8)Bl|W; z=6{?MoKxS4$>uS9r1$lud_coDmC23Hf30Gun4>kbdua&BECv{w*Zrb6@=Iipr6=C<}zDq_z;LXm`7g{9oL=hG_Rfril_vuaIk7ncq z?}n))0OgAdpF0d&IHeVLkj9e(oCP3OJb#gSIfAzBT9P$W%^P563!e0I(E+38Mga4H z@6*0(akHQttgXs&O$DMW=6Q!0*cuAKAr3kJ0AEU!RzNQ@3x>#6d|-D1o4vtV5P+a9 zE;~?!stmE-hjD0Jl|8UdJ-bxE*pw_T?T%t{1vY72o!Hj1QY~Pi#|2ZKcYyHz>z3J;>gcExh(E&4lUJ zj^I94M2(vq$POf)D{aE# z?O9Fcjz&nrH$SQSR#mGV$s?{IjytXS%!n{UFDmnUQDn6mb$dHiw>83!4J&CCqG|m$gU$pF_PeUOMnSO`j2XcX#O5gFU@7o%EU3l zW4;E!^rb-$IbSALC0Ro-Y=#*>r3!iA2kCKd%{1K4BuUkP+?;<(Br;kkmLP7q(Q*lp z4av?2tu(hsf*J53nPfeAx*hR=GJEu(`LYy|RZ(QeFljw^&JI7VH!KMfw)gR?g=Ucg zfI(t6KATbP^~mQXam<9^ILILSVxdH7Jmua(2pqvd8DZ_W*R?vdh$NtnVC6|Hq!6R7 z`S+@vn;#?dszGea3$rTWkT$7S2^0F)HjtmXTzzwDR2XUaV^-CG^dqDbIY6@cCA9k;j*&1H~^qM2iBTg2eC+% zWoK}5#Z-kJy+5T6ORI=XsIHF`-8n`Aoc{nyTe30F#K{jQ%*75=GVeyyZRM6xhRmek zF&%g7QMTV178Vy-T;SmJKTp=X2Zg*A^O*1T7}h&=+#o0CKkL0@+tHP2?TLLFQmZYo zBz!#Z$V|zpU%+Moxs_jF-(PQ~br@~@pr|=i56Y2^g*RKqoS31?$Jj2w3OcL79tkD= zSbo*jQiN#7WDd68-T|sWRlhVLgl;==Hdb5|^lim+j}Uwo)3ubA>qkVey9X?Q;we3` z+v+Q#LnN62j(LVQ^`RM^qhXb9;C=hnQ>O}%#F4bLzq!-u__ot=e}nn1=ZtuPFEmTp ztnQ&`B#E%mH}xOqQ1V^^K~~D1_37UhxE{}pG*)zoN2x*yhGhU}9YcFplsr54lJ zPC$_ls2#_+s*_91%yUqh){`WY#i`go!|K+Do+(D$-l~0#T%j2(J_==4r5}zQuIu*o z{L9Xxruh!TsLxuAYh}sF&$TqO<5=}AJ}Z-u&GN|^>W(%b{S7TOSgs`bRHolIH9fMA z48O*uQ70GF#V02=}6Suv#?;XInUmz7+1IPFC3rnMiMRl@{qe8YSV@@e(2 z5!`Uzpn=%QX)fs5$YJbjpU6dN z+jn91XL!H{}NdEl~E?WxtxFpp00)R*0q)#Z zpYRltAu0Als6e+?`HQ<8ySG+8riilVeBe`0H#y#s=Cd~{oDQV=3QZNJN=3~!Rx^Rz zitD}_coR0hsei1rwiX!VnAmlq|J5~B<3kf5abmM!_47pq+(mvZK)Q z=TdbWl-^tJ&O5F68~vsK0BW+GEpHGKgd7rgu2ZO5YT5>)J)PpTH!+@4anyDr*EQ=8 zF_`6;AnX9ztDINB_PkspRw%a%y335PtUH=2;yit%Yi}+5c;~u2?{73x12w@cSbV(R zqMO`Kmc%;50;uKAcW-)~;vODt15%dO?=i}+jn^A*Od7@%Mh$SB#QdWhs6Bq2E0~t( zwwgId9j^>&J=BI40TnmM2k%3+V}>OPovB-7Y{{XI(sja7mK_QQr zW0Ygu;~1!^G$3~YN~B7pVmRBaT)8e5B_YKj_nA z6og7S8PUqFM`~($U?z2FV<`~|!M7O6`u_l07*io;ESL+B+1woBh_;hl%Gr|$6cN4% zJLFJ>xJ;>FeqWoI{ZH?jl9B+D+WeSM2p{>eLX9DKV@5_iq>{PxtXq^|u9<9Z#)a`n zrIRg|Qb`*CKhYG_lYD^UODN)uJ}@~?C>`jM+F@P=S4@E0ckjJN8^$q^9gYd#u1{)1 zYVPs|!!zJ1Y@8E{nqV2N1ap(j-!J9C{17|wkFq04yzg_JR7 zQdLMBVx%$BUqZ!sh7g~apHENLg)GpwG6DupPt&zE@*ud7#2vT6&U&>LIPBIStgK6X zrH^5nmmPq~k~c&1ZXZ8EO_s$UnYoLx{iuxaMC3Ba%6UOJ=r_TksSq|nbzG1GeGN5{ z2ar0aE>|mnd3z1(DXrSPgZN~#k<&Gl@WynHA}EPhcjreE79apg->y54>Gh|HVDB8} zBoX2B<$-|2a}BZo0ABRQ^6BD@p)3 zJ!uhLq}GUDDP}ASWfwV2Ha8t|Gq=+{MG9q{GeoH%jG-88Fd%F;?ScIV)}e>c1-;F{ zxOmo2HA*QG2HfR;m}9>G0JR>^TxOj^TL%7jM=5(kP1ojMh7lM-y*K)@IT z<9z3FOC}{|T=|TEg7J?~1_=YN=EiAlcmQduglS%DC5~A^V#VD2O*2PBdOJNS!f5@`_;<5EvC1wa|?-+y0vX?)s^ z?Z=ZXz8lLN@7KOEG2bJv*F9=ugg7|rH{7K8iL$Dp*seMe_x-BMRQyRO#Ig``l8P*R z1Ho`#-HXm7hRwTDE{*~F|K?Sm+m5n1@Dlb3O0YIEbNI9 z3UH@lN$XeBMr4_#x#m)zm9x1t%Hr)64=Cl|2iln3>H^;`ryuZ|fz-9!#1&iRa7NXZ zrG!_2I{vm*WVeB{B;cq%o7X+!uLT=lvpTk#;w@k=E5>$9r5V`zFznxBw@#0t=>qOg ziYOXPepWjQuucB}X0^nCWDIf+l=i8nq+M-wR?f6*PFseW-|r7BF(CPz z{{WR%t#xG6VKYeZMa}u6kbJ>?hjUJ}O%i?}zdF786od?0!r#iizgo=jlzu66HF3PF zRd`f>)BF?e{{Wpvx;BM(uF7up588);lI30Wpt zCG8YvENhhU4~aMW`!s7W!CLNezVWX!PE%iLR{d|sr#N2sj zWX*4G-h9O4mObj7Hn%*^WVrqxaHQ2ub>*^*t)wAb{%Pbnb5YakGu*Kql(~pG9TvZhuj}Zv>qz8hfaIxh*+asj2!ugupd*kdH097p3_jY zFf@W&exT>)g}q0&wMtK@&3z0Se6te@%qCVCD#UI9_CMCNH7wG2d>*rFt~pZO7Nwq_ zKU1+;T*-9pdvrhT5^e`ji`hjz67w48`cAS@0;JA_Y-5vgRy$o_wmjJWU+*Ev{VQEM zZ5#R*keVmsZf=|VRPPCJ9sd9y>VMf*S&rNgZ!WR2FLfU0`c!8N*vZ8jot~Mcx=nj| zAc{V1g8Gx}22a|(nZR5bXTZ7zwx4SfxdSrF-=0T5n3LYFwxhZ3mqc=&)a%N^-`%a> z`6jygFWvSlS`Lo<4QXknqVie5fg@GyqoAwanW%|vU6sy2A2w(@{+^fCk{FL9hzcPy64BI+nwN0(tr6@`zU5I`uiG*9jSryE6^&0Q8`XhLyQvL-PLs zH?>9!46;cyG4Vr##~JA!axsqXCHYE?+*g|K^h>QG@#47V3>1bf+t=&Ty;W~}F@8pf zpf~`U_O}Gt$p>r`1q0PbCFNe^@I zfs2<^2LLwVPEYN%kEm-#fV{)2Cg%^H{g@1S=B|W@D6X#?(gCu*#lSn#vHVKmi1wLyfWz??8%VRhZ9j zF$_Y9q8Qv~A7krM=mwhhP|lJdXuumE%opkJy=r-FuB1}&u2_NP{#^Xk<9&(trv}aj zB?1*FxZSop@BTFyB6dYVh1?O!m?Vw&Kj3K0CYCn}`EI1EWrFSoK>E<5kT-PJ?4cr& zm=ZZx826^lYFEuBMO@{;>^(oFZB}%Ah?+8}0c2c~H^|4{h2~75%cnc^$x=x;AY<%J zPB=hev1@qAenpUg1Lt$N+wWU}=8inZpgQaU8$bSrf+Um3c*66pazNZ;uU}l#caB*i zC(KxoHU>BDd-mLlmqCv((!8E@U!3H>ZTCN>MQTex6_^4Fu3K)u=u%_|V`PeE42q;F zJ$Ks!?OJ_9&60mIc8$pScF6ZXQIF8nqY(m;W|fKLY^G2Hr6Wu2sfgCJwHfQ|9jZ{C%0Wd+1+10RqCJN4Xxf1sv| zX>o4ZOM-zU$qF(_1J}@cRgnePk#of~2$KlhvgC3R)RWe-n9Zi$NJZ01@)D&9c`!PT zm6c}CQd}Kk=$?N%Mn;KLInDtmnQ}%4L4Zg&&U@!Hg+`H;)uoy+1dZH82^(y7$8nvr zy*`v8+_JO-o>Vwo{{S!peaE0V-#g>2GqrbU78^@r2bfr~LOy+$V4&=J5>9_9&NEuA z<<>#az3Nn_!#+k`Cwoq-IzgnOo17lmHy^fsUh>n>**J9lutwfeApS z>NIr^$QP59NXrc5khur$eJJumE^V9%3&vlVnOLz-#Hs2585z%Eo%f?E~&bssPQsT{1q zeRtp2Am@Iaw=~K`oDi`kysSA+3-c*B8Tkk!J7=XpWQ1C}y|We^jV^hZH!`OAW*g)Z z8>Vs4U>xDZI?~9z-fD8XU^_@c5(@&o_R5{x-`cBi>9^3aR1)Acg(4u}6&cu#!61K_ z+jqS4e8^fhnXu!Sa;YQS@0_+WHrqSWc`=&62CrVYcMw*kl4pb@=9nL}iUKxC%olY>-#JbCO91)9poVX4+0pQUetE z@QTf_V}ZUw!5x3jqD`w@+rOD?u2ICB$CcY49OF0xfBI4{r(g_c48cZXWx>X9xEUB5 z_1nEgw7`}+v`h-g=E%-D(nk4950nz!hy#7SMK9vUS~w)IQcMeCMxRZfm!2R}ZzGn3Fa<$v>Ny2d|}a&j@sJsD{<<5E$WLfV=Z$ zj_3UQ{cEymnz~+UVg;9>*?R%)T^65jvK}ky`W+UZZHV}?C9SOtY{cY(bH92`#jh-l zDmLx@6)kf=l&H~_F@9Y7nqzl7aj!EG%-uz7wl;2eUD<&{`a85R^bK(z+w%R4L87@LMK*74#ow&d87Yi2h?m2+S*!AmK z5QL@nQH@e@huUYM_~(nX{X;^!vYu3E=ZTe%UzGO!Ysgy8qZO~2^NdE^x%zEYjwJEM z<5J3`m;+(H+gCEyc((rlQfCb*2*)H2`_^Kzl<|{4P1fUJsw`zc%)aWpX$rZ96XwcJ z0d0ez(~B?>wpPqBzDG5G0Cl>UgyKjYi^9 zE+@LSS$wHEhbnK-a^UCwBi6jn;pK!9YC?OHh+@We>79xFGhV)qD=q@m(}^v)wQdEx zD0LfaX6Accz**3LGJ;4Wuw&>dyO&OB(4QNQQcii^xx>%Q%bo<$pt+JK(KPQeFx<-x zq89{h*?MFTvHf*6g{DAVt*orA;@K1=K<79Ea1P}3#y_PsZY|NRbvbnVo7rZX>Nh%W zrKaLoE}J~b&l7;@%!FqJx99*Epq*A%XLzp=D)S#9l!o7+=E2Ay9kx3RSDReWuWc>0 zA74wiVC~i6?WnzS;JvO%sQ$wd>g;xV(-yBah9POt~$Cje!}@Q;z=tZJuz{ z+Mb?aw2MGjy}Otye`-U3@sPmmKqEZk3_5loZC2h6;tm{-S=2P`6HBnxbtF?1_irnm zHpuf5=QfdX=LZ&PhkVp9PevC`~=N$8N+U@J|8ow*YE7)~mx7 z?{jy~wkdf6EYe0o{QC^2%oqmFPBJ#lc{^Ta&}Sjg`7EAbRy&2>y=mj+?t`RQrPc1J z+ODYK1f2Y%pgSyU`BZKI+*gfw{{V>2u{G7<9HvqNDLKF!e?eWeDAL%eLOd?!b)Egy zjGXCD8SUHd99r?baWXi#E}8yqzF$wM^{f7e#Zg_``H)M_nCy24(ANxu_}W{j$39nL zJ04-5+LxqX-09{iZKr6R%aXlU0PHbXmGt<_?6c7{?`dnrsa0G*Bi(<89d5@>;%lqv z#C~eqh~swtUSqyKxUW}O@pM-f`7DH!k%QD%$^I4bp5sHjh)N7j7mJsX}1Jq|fdZBR*q`GC` zy;6Be{KLI-J`eGIE+Iupgv6b&-K(lgtFF1D2-vF@9Rc4pub)E(K4_KDb%dS*NXb3B zXV#?o7G%N4H*s8G@ie?Ms+()3D22XP8w2{+P2sL5it-S|{$bSAV#m zmj@f*oc{oU8NlCqxA@a)(A`0(L!T;O%tS+@XZ%6_wak$&mf$IzvMYXFvdm65#&_TL zu3q8_6&g6|^xP^qvTixc$0U+R8GH^!?nc|^x$F86Ey~DL2N`85 z0+8RP-kGNfsI+3)Ac@XYuo)b|`vNxGZL#VzP>&xYyR7XiHbjYx1s=Kd&$nuCC4&Sd zOsJ&ipPysyd*fg>^{Fo=1n|F*MsXL+gJ-}S=O15A{Y?(@Q@DpJl6*Irf&_Rc3<<$+ zzH#h2R63-vvGc@{sVmN$<0`(p@7|LjD;x66RTpu|hQI{!l1J0;L|)gDN#J{NtTLZ1 z=jO`-I&KFxcO88`)aKIr;v;6~3xzJ6$2_C481%>Q?NG7W4oMlj7aO9f%G5K^_LHkWSqU#EVR2ZG#3&*$YQG;A)70X`}X$EaZRo7 zo_lE>WMdlRd9og*R(hAUw}J zWsu>BA%|dh8*lARyf$ZpS3UH?Vz{$VpkyJ0Avg?IqSE#)~6hXb^<4d&R;D8{QSxZB>PrW_r(%O_GI2B@Fm*XTTiEH?P!r;$ME5Nk}=aG zsKD##oyjD>(sjQNE>x@AX^QUysf|>s1`-_L50q@Da5@v$dh;~)OD6G|5l6~A$L3rR zPk*4#PTgw~8<*!NEFb_QC?|aX07H>i`}>p3;TQBO!RiyF_0i0StWp@9CDHY z-#E{vz~kvg^Ys{Gk{C5v9lXVChUEuvd!3K^_oLn|=k}R7at_tGyc9!fOghYE+CZ@hZOQ@wVsZdD?YQ(lwZ-)AA=PyF z`G%(?F`}DaC^s-vW4HsSP4n8OwC@!7gG#t&`0;ZWC_Ivks^hracEv5$68lu}9G8i_ z5!ND*U3_Jg%n~ar#F7%gug@&D9wQw!e>c!D@_7K@ofsZ|VECpZIhoMU0@vGzEI zkHy_mC@h0auCe^YEN8y>IXwnP?^5IO4}RR|;No@v0CHkEEuWZ@3EQC{=AiBL>GPQ( zjOO6}CDUMEi__${lprKVp}TAg1B`xL$&>& zH9uwej<|++rJlu-ILektQN9E0K>G?cs?mn`L*<{Znpz`^KGroiN%Vmqt7G+Kjk&*Tr64E^)p9*5BEcG|l< zek*X~mNyqZb+TMa0=!ejha_h^XKkC@b@!s*@n;8WaK#f_X~lpJqBz2iy>LG%!QW~g znM?g@ujkYbCmfbH0h<##v_LRIRyF83f{=IV)8Fl&x_s*#O)ymC<>g{}V{`J6l6v5s zjdbl#hqzZ*R=iD1;GhzTC$@#q2}uMg$sb?T=Br#`r*Jm`Xz|}%3)`uLoS?^ar8gk_ z!#N;Nq0wMX<8DX zmU&0CZn)63uuQ7wwtuBU>U?OiTL5Gv1F`1by!980ut^IWnZYVPUG~jY_?yNEgk?m) zD(t^E@0zj@oVYHXNF|f8abJk33QAs1VxuDm9=NI&lXY>&8lRnaDO*VLMh*r&{cC}7 zH;KmKoA|`SdX_zEUx$2bwX(NJmH7Z2Gm6Xid8NWJ)xs$s7^##cw3C5zVrb zwKsy!{ik`GOi4L#6(%E;XTCfA>uDK6n!-jM%qh}Yl=Ck_ugpopf={(Ox6m~^?0Gg9 z5(A7dh~xF?S2!ZqWMo@mZEKUr8W{3P$FTIL7di&9DD!64Ze0R}=L&Iyjk@&dRlBCV zm)Ad`r*%Av44MX~6O?^A=~#9!u=ezGJ7iWqe!4FbNrt1x|DN=cOllyCN=a@K*S1 zM!vR+V)2idp}-l=8#vq8AJV#432U0hnFgz;+ZAmqP=(?)cL2E@!v~ksuYXbXrkZ8| ziBSMR#a)*G_0Lj$y8Be*I-A?MNig`&%2Z<>qp-PRGF$E?J^3GIBY+HuR>GaFo!MFK;RA1`hl5_pFb3rzh0R zrE=-VX%6fCprxZ6&b$1hwKQEqYgq>?7}`oC*un< z%15E)t0Pt{vUQX52M|vl18b zmOVvzqh0X_R^cn_-7?}wlgVPCv$-9QzisNHAByvhNgoUtH(d3td~D{4%H}cZeJRqo z)-6iNMq6#L82V9*BAkpGqVZ3NsAXkKj%@Y^Z2DAQ zjl~EeXOXZvXFt7P2)tyr6%#w2x5XCP-vr!g_kuHUi_aYFcN=2>eJhZgH)r{7gfZjh zkRd0V?UB=eWA>zfwAS}CG^=t93YlW3zH$20OxKVbiOM%LNw>~A>~_w?WNnOfJqG5v z>)2+e9Zqun##J~ZUO$JqP$elF?5wCc8+>yE`h&Rd=|l5k426pxUzZN#D(p{f$JUzv z0BAuLRn=XAW%DtB!`u)F*mdjlpf;@?%E~?)cEDm7@|D2#InUpJ-mp|24N_c>XGDNT zL$k3sbO)HJ$mRr*(_{OM`caUeg~HCyzsgJCqk0f`ImT)R?^cBz#(#*%KgC8TIqq|} zuE#&UJC9pn=gkf4=D8(z>`pVbdVB5HH9rd|X%bU&ERu=A3Vvfa!NwQ@TXEd|^IBV& z&L@EL5tc5ysPB?GV>#RYw9XAmF$9YsR>N}~;Nu6N^y%yGy#`66mUxyaJf%_-U8}3xdec4Hp1fvs3w-dreozskL9jGY!XNzx@XfJcI#1Z z%n3aHSdFp(85zk0^Jk$r8-Cs2ESiO_y2UERGn32}!Pp#aow{<5+uoXv876?wI)@@V zZc`>0u-of{pHE-?0VV3mBgN#lV!;Y>xc~#VsoSL_fefn|Q!J98z>pr8=)H6F#(n)& z98S?ZjT^}+QVKQ)DL5JW{{WRw3#lA-3Fkwz#}6`c^Ee#_{{T(LOjC5YK@8~>Q3789 z$s~+m7ReoY@3&giLPH89kVvi*9E5_oINxE{x7vqt#sP&zEzDGa-8&JSp13_}?O6na ztndh=1S?z&Cx+%{X6>9X#N4qLL`R-IvulsdgDIy zK{1U9jo9Hb7$DeecJu>1&!+V)iS!yvu`h3JYX%V%mq5(y#GRC50Poi$xAfa^az-Qg zb&f*d5tjM3C)c6ab^id;5^I^B82MO|xfyafvz-09=k9$d&v7dwmI|d88kt>XHfcb7>263LAp5MJ9OBo@!g`|cF z4^qBnPBFh;+fbr3jF#PrF4E>~oS7SS`<;(aJ#$%Fz7xe}vjk2;e6fcCw%l#oY;~-u zDr|fm0NLJ7j3m2|sZy$pI+8jZcF6tcQrl`ai4sR~%maY2zd_h%)4zI_f=0Jes#-*l zf*HRsQPP(s!#RzFh>C}rM&-{o_&s~pwLB0aVN2NLmfmQmRSS|B;3@6^?mxT@+5{Aat3fqXFK(xO?uZh?6dNuhX66#m~_PsO)db8)9qSm zN^&uc1|3I1*QQTObMR~DIiY!XtY~&6? z*e9vTs&_U43AcfmB#AUR$y2#ry(#OYQRzZwniynfz>F>l$j*H+wmQ=bdy8`*Mo_K@ z^4XU>-kGOQ6p)WPWR6(aWP`a@{{RYEU^I}W%xZ{YTaHjq$_7SA0~qQkZD?Xul1Dc( zsU;EL3UY9IdjM%_S7P?-%t=OdY<)V`+%z#T3amNu6!vca0Ie+G2r{l5P6nc%*GzafVW@)ZS0Bd<@bNeSb-yBt zyJm(|xOH`4Re6Tl{{R|nZ5lRn@klx{oxm6yZBm6gw{2Q{=XK0}FA(Wk?N68pH zI#!_YvO8ZoD{I$IOd>PMk)HYe>!Vs(i&<6vBOtz6)2?yodeUL?N)?>*11HRO2N^jV zdsfC5)sn)IWNJz;F=sN=d@H(h9ILFfP)O{%oxj#;h5rD8vzQFBYonL|3L^89?~LtT zJuP9hxm6tVTC<*1JGLmz@R3I;Xrv6psU(5|`jP8d5!5TLRKEmC)|=|eCk}W=!jU&F zuN~d|xLhxt$e{0>?X_7nPX~N6H?yVHj;JA;Co#9g^LbB$%ba5?pT6I#$TanxrSm0r z=1@~4u0E9fNpE2?mRVmRR~)-@XRB4qRk&a6_!aM$nD>YLCh)S=(p#%rffAO?W!Q&s zG6!GFo`$+bhY0vn#At0K)9l$^CW!fwau~UR-A*?nVk?!-8fSSRkS{hIC~i+pl5OJ~n-#Lwn{ z9ad|8R11a$TN%y{HyyiIIggArOIv4(-s)_oQCLVvOlLJEr+AUA>CrqRSCM?i^BPth z+^yI2p=uN+{wRJSmN4ooBI>8HxszCNP8&PTls$-)@x+sQw%Hn$9_r%TA8< znnfae5*QQH<^+9vb|SA@4~yE~j*8LC9qep!@V?-wInPWC9-ms7)BgYvSFv1N%3+ED zl0<5!DY);Q_o+LZSuU*tY~tFFg8Xfyl#X2*=5W!6#A%SFn5vD2d9&J>p9%P`2b9U8 zUO$y1Lgq_iK)@S)Mn>fK73hn9#AvtkE@#tH>=J9tL#;%3D}rJ$M#q%1*R zR_nMY)Y4P8mr9uW4{_vK_(J-|by=bQ7%E$qHdO?Vuem?1G`8Wf98$d5m5Ilgzy-!P z$Je!cTTbzNgf%-7_mbMACkSosAX6iOxF>F)nrgS68)^)Y+(V{1$bTv*BoX;XvHt)% zv%*sQHBfFmRR;~ccbuV$L|f%xxgg+-*4Ky1GQ6?j`47#ElfO*#^{;Dj2M73hedNIw zm1|;;7##M}`E0x3oQ&tMrANPm+!t*q%3MeSVN=Xj4mymC2Bht4c4>l=JZ8E9K=Mr^ zq5<+I#{<{?p493cFUXQs@{Ev@ag2Irt`2L|xBdfg>c%dtFJ$;)Iawyg**&T$zYnMd z;@fKwyaGOGBQcOV%z{f0KGknUez88N_>Fq_2Ou2&Q84@y! zzFds$>T~b4ce~%i)~f3yminEf=!yhth}j1uk%R4ywHY^oTvZi`mRm!W*%2iS8-Cwz z#~W2LRpI`O6F4i|`CrA74rgFXw=YKNvHgCuRf0ACB$ni5w z#3h%9CusSy7n7*_Z|hAjcuP`-XyLfgu43l_kvVy=2*^7OdsIAGueBCQW3kPlx`)gx zh*^;A=Q{)6`qJc{C|)r5DL&;Dx@6;Z$G1x85^#;+ipou);=10vK`I9H19W#UXqsM(%6a~{QE(lk(N9Vyp-Bk7I(A0g;4(Wh=Kf=G~ z$$h{FCu5G)(^o>axw(y!6*9&L`>l_seB@RR6SZ}Pol2yAi)?eYHmSzzZ*gSfQt(VN z&HP4qAP1NbGmQKGl_SHet8k^=;^`OfZ57J-a;82>R1ii-KtTb4>JLL*R$d;`*h)O1 zET>>ui3`|c1W@icTEXWMVI(}bUQ@H2^gHIQj-fT&nv;(yoTrRaSmOxo?qz~6kA-}e znkD5DAm|9_7-wVXaoVUFj|gk((m1r{(@*ARbcPu@MjHw90reQqdUUoCn?z48H4P`0QvkMljlk)*Z%SX?6P8>HBXH8e z!Czc?IRc3tiOIr~PQSLbSGV$^vA9(|50=LxI3)gu^sd=1m*w57NUZvXv_>=d!_9?s}X1~}-E=C#Oe7RhJ56XXB{{WAp z_R=k{QR9rXY))0n6Mzm!ZL|JWZi`EfCy7!RLa7W)Wq=;|A5oD@OJui{#<4G$Ij}z{ z+;5!wA6n5)yQ?=d7E^P1Wo3AJ<}_JaH837rtgb$7k4?t^0KVd}v~`(QM4QWZJcl?5 zzA=-ZO}DEQ*0LF$xqww7K%BsGo`)GY$NAD)1!)X+&a2481?FS7ah{mRYJ#Yd43*F9 zcaCRKiDg#DL55sma5Il@UYMjNn30xXsSt9@BwMjlo|)`Bew}K|95IPplNb&BCw@b@ z-vcCfph0CL%_uPm1N^yT@}2j{-~4J`Ak!fqhwY)amQ-OEGXQxPe?ma%`~Lvc%V7l1 zm6MX-oU+82EO++<*Y?FsMW@3QEz55rMv6>wDj%5cI-gP7)|ra}V~~|jU@^$RjGtex zUi5gP$T(oOkx65L!a6EPqh#g`p=9D(1b z=~2jZ7-}{%+^~+;-N6SedHz$=J-T-KlUr2}A1vImNC0q7PIe@9&iOqy#yZg@6C$Ua zADleej&a}9w{cEk zl?06+${1sm#xTw5PRDHZ_B5z`tJzrqQI`b6^B8(r{Z~nT|_Op-62qg1yh6^hLjE88&tgAT;Bc;e>E66$6C^=m8G^?4? z;uw$?S1Jw($P7QuwL4r3SS}sL>$pLT(`$H{tyl&O#FAL-LG4-xf&-|gV(bdMU=Nh8 z`90`~JKfAgvJaCRNW0>hys{XKLmc2|ex8-ME}9~*I1D*)mG4nc10;Sn)<@)V{6Uag z6{QhM#Io}sI2_po4^fI@;s~FaB4%kqqtT3KBsWl(Q4>hJ5Zh*vJQquABL*>N8)Sc>Pu}iHE{Ij!?iiYG*VgxQEOVoC z=ajPhRf<>;#v3_eZ!yYA%5YV`uhNbEmfVZV6h_;X&Tx04+_T#t^3O7zvfh;0?n?x8 z`9h49^))@*3`{^p%y%58cb(jdzv}8l)p1DDt0N1(B!vu z=Kv}N-2VW?DaJ?v^V%fyh>)Z?%w?PMo%TMoCAj#dA<&F$9eq8$YfwQX=gmb{Z1g<{ zqugm%MnoSfMS?&L(AA1b0U2?qM2pV%;%Wa3zT(MP}y*<<8luD{EIpQzM3B-x<%n zBHgXznp9%OMA&n#cc|rn&&y#Nx0s$`AMV?+q{Iwo{n$Li1iwuB=AsXU5|_-v3I;N#M;T*z*@=1{0qs+Bz{ z)E6-!Lh+Vd91_Huon{cdc8(7U^b=KQ2wtmR*3OU+Sq1!m6XG z&UZOD_CIRL?!4HQX5ikTLH@@qmtdnAyZLNW8-Y5=razsd#w5SXTtj%wtg* zkiO>wWA>;cr`*SIvd3`2!OtN(ezlKpEx(w+^7HoxJNK&F9dKW5_Z2s%ej*>v9(|iii1`SO%azAX^IVwe z(4;{`Yr5kjVf4rATAC>qR0EY%U^ol5eX46vtsasL9m>nc?>+pvELg-r01fbZ^{Z{K zi+WpmG9~;g%m*@+$QyJy>t1lb((d%TduxcM<@kmC!+iazovqv<3NBtoI2_p@QCCA& zuNBT1@OrKMLegx5#U<5^!I%TiLXcGL_WRRe9|_&ZZRhFQb@E3bNWlxtRP_M34y!9JtEZ>4ejeMb2u_@9fIaYFA~*+^m@Y09~rNUr?#Tqz6j7{5IA#vn%}r_EF>3x?>gZK?>qd=rd&nt_wTPAH1enr$NDIQfAK*q=-awp#ep zt3|3n-bzIQ%JU-~hU0No#PPN`E!l1%o>_^)GLejq`_+++oBsf2fZ>Ni)~q!Lwif@BAg=v)uSaQqF!oAVe5e z8Oa3oq}+HHqrqU$2BmU~60Gi*iI5@q)hDlkXD1jH|C5YVh#R+Z0 za|t;ewgH#r0Q%;-Y@P*On@6~_)u6b9{{XuW5klUdPpvT3ybj_`6vRqFEG$B_Ybx$e zPIv8C(zC4}^#X{{M!}UFGdVddH#i-7{{V$G<&|W4BVxhSfUnSfv(l!f;SF0)n7q~t z63LJm8D$gNIWmukF`+NHu+Ug3? zhH)$I7`HAmbH004rd(aSKPDJwakhSyl0zdeERW0x$>lu`aZ_s|2;7oe$t}A1QkKaq z-zgnVS3i8y86zds8HrXH&Q5ygw`wZD>o1nc<@ukMMRV7_-)hM%(8R1%sn5z+9esD9 zE(RtkndFh$PIiravKSH6@AV&ANq9tVo_iM?d1?k2`eb|m0O?6<1-QUTBl!6#z0E5)*KGZ4FMe|&s5~46>U5MM$ zIQ!F^o0O4+UBh%7_eanSp4a^)aOB}ykXY@2C165#A%-q;^ZfC|w8{;EC z;Z5h3Giws6PSGQCHUJnK?l%4C@=Vdm<%rcpgDnnnKE{;yNiG?elskNjz5wai{c3R# zG(_?lytxh>63D-or&HG+^yVKn(qs{&qyx^`8>!#zjGAX9!9{T#VbxgU2N~aTbM7fi za4u0nP{_IF$jMXaJ%06KLj!QpO%=fpAo(d_8 zvZgcU7j+|L=Qs?1T<1BalTQ*(>fzMN$87DteZS6)HNsDrSiFspFuh6dl785!=-c&urFE+SVAAl%7zejn}??cdVv|oFD($DvX1k diff --git a/data/images/3.jpg b/data/images/3.jpg index d334a861b22a5d72c73a2acebdfe0e4cbd041a75..72bc7315cc83be0fadea2d236d3d293dde7d5652 100644 GIT binary patch literal 16105 zcmbWecTiJb6fPP>K|nxIq!SUO_l}gPNE2x)y+ovgfJhfe1O=o>1O%i;q)G3+*ML;% z5PIk}p$1ZT`Q3ZxzM1#eyLX*)X3osn=gj)D_g?#3-`csFxmpI?ey*Xd0U#qI1GJM4 zz|{)ifx5r50|20_3lIVT05?g`7y;y@J2KMoKkNz#Py<{eBm3|CUq*g|{6C|(ex3XV zB?Tqrf7eYa8mgN&sc%wJQqxjX)7&B%B^4b#?Jc_h?*IG9f3N@d7U{folk(<&E&k8! zsvW>^6OczHLQeJoaE*bCoPq4B8vrEvdV_@MKT!Y6$gYuJzd=EAiJFGgpyoEoeRA?^ zBxi42zfNi$OnMHu&TxZ~Tk;9T9YY(+2cCDOLgKP+@;t5RV0!rv%`5%(LnsyXJ!TeG zwugND0)j#^vU2hYijUNuscUFzX+MAU+Q`_%)Xd!0&fdY%$=Sur+sD_>Kj7o%FJa*k zUn8U96MiHnCI3uG{hghYo0tEmps=#4x~8_SzM-+RtGlNc(bxZXYj~r{cmLdA6yJ1T-Qi_aGml$Tx8e$NEbQ7^&8xh z6pT*{DQ!IOJdg^xdG~2tRz(LDkMv74)7uaKsPFO0tUSd02kn0#`~L?l^#2Ok{|5H| z;+g~8A}1qt9ytR51h_aPnhBoci=NNJ1TanKnTc?j!24FMHMw-tk!TQ%2N{LkY&cz@ z5=SE~VlxWUEpJ`{7^0d!JYhZCC=~3lo}cL7VVLo@i;h%zHL1MUfIg3sLwfO# zr)Ii)SP=f{!Mhn}a=jrAuTC!~X&-y?;~Gk6R&XOhzQwyM;U4FKsSi$XFvwg1l6o{j zo>fr@3=Ooq7g2VnXBdiPD#>d*U1 zehr4E4*!A~hOch%BDNa#AWUkwoIA@|D&Ak8blZ`(3vW#X-1s0Ji#Wdt81 z1PviNp--$mr{~f&{-ruo`doI=PAcuI zO&IK%o+ozmVk!{9m&;oP`pJ=Tq=@oR*c{L7O8u5)VZ2c1jH~CUy8@V=m*sS=pKK!* zO&nKQrmjioJY@)`A}Pc zj65~HrX_>b^cPiLGi(ymtpN|!_lB@ndx6tG;S{>c7Gr{Px9tDovw3)qd+d4dnCl*; zFpaL=_8hCbxog1n3U`XW^EQ$y7;Q%7#?7e8(hgv1N5{7=NALOKdD-`|3*CS6V~ zJceQuH6A?EA!4>)iK;XGI@#DJOywUxU$iBp7BLWxp$$X&4^}byg>7oi;_VVV^ij~9_U_8g{3ec+VtS~@5gpP4h7R?ib zu1|?0)|qnrd1{4EOyLy94VF*4W~P@n@ptE@_m=8Fu(rAS$To%ewg7j1+5>u7tnL-y z#*2snLL=UjIPo2h|5CH&*w${{mJNE0-XvbpqcA^4e|G6Mh{8$ab9z-#vD!ACil+j$`PPhDrZfxdsJDqIrkq=*M30Ck5 za6M?6$U8tY#9AGu<;;ux^f{lyE}<8&neP>HTDGT#HhGE$cd@^0^ft8Y`|Nu6My$5t z2*m3OFkM!w%2tw>mXAcv^{{apG`+Tsc@r#Ywkn-6?0)w|3tt;+!NjDv>vaTMO~x}a z8;psHRulLJExMYElV264Xp1NYL{4ah6OLu{EukyY?I!(gSZK`N>&W){=Nnn2uokcSz9ZFcF=1JcDZU zb~>k!l#~ZF1!D`gbEe}FA40pMftIvP(x%z;^naw z&@&6qTTj=c5OwYps2EshBf~MjUUr?!sh4ws#vLnzN`L zlgmSq!P1_8XUM-9u@lK4geEN?2JkRtaqP>u5$r_bUk%&7{2QoTh2zKUO$J6^JNGXQ z>%@d!IOeZE0Vn$gj^1PC4FKof*LO*{d%ep*Uyw~R?(=E>^^q1O;NN5=|6f6kz~_JU z<3>s!P4#45le;WCDpqVgP;#g(p}<-Y$zgwgTV$if!oKPJKHVO2?3HGOwwTl?3_7XE zogbPZYk%WsE-ntEOn42DrD=pNz3=ckSn{{uMJdetn~nR5`IW+8VRxIOSb`I*o6(%+ z!z}IeFN%UCozR8%{ZqLu583_rV|)seb#ZORG(%tm_FS#&wi|1dWya%IfaUZyCgoQi zXq0jDXj=td9z$+gURgP@Ss$+E#x(Gmv%6gQqKNfHn5=Yzw~4Ud06j+cDw%OepW7x2~e_xG%ZL)hd*9Nmrb(Qzqm)|~%$p2NEDk$6#z}nxlH9MuBYmU`<8LbwI2D2+E%-YXZSH_6* zq;p|Mt^n5BiiNF==$dwHSAk}s<{b+}o~9?Rz(OkiNQ|mo;W{}|IdHsKOc!aTF;I=1 zS62Q=XL1GLw8y*dNex14pEF{M7l@974hl@VjX#L&xmdbJOP*j_$UV|>Yn(lc3&5gw zJrIOSbHaE%yBgOFVyx{Jbh(uLFQFyK5&6Lai2owwHL@&jbRoTiSG)qmrMIYNj=!Do^VOT@fCo& z8|{!nSB9c}FzlbY%094l-~UUiekz;#%OyGrr9-`;+=RCKK9fmz|H=lownKlxm4{So!MXy`SkIaf=K(b>7*JlYQ!zOb;jwvEg+suOCP+0m7AD8Go) z66frWl#?`VkDx@Jf}_#p(`ERIYU0tzUI|Wq2&atwfYX_h`6k}m?>!DyD_B6B4ZXEG z)~d)JxG%CSbi+W4oKmr0wqSL^_UvmT8;2@0)&i}u+2zOV;Su2IM?K<|V-cEV@9X&G zxheBQW4_+OAY{10md$o=_P4{SL;VT-Wv=#L=9=a@)iJCs&J`=Yx|m2i3=6nEg{P>i zk3r5q+x!$8LoOWbGTl=)m)!-TISG6fu-^4>tf~^05-O&>J&z)vpYCj2>cWix@z2p3 z3H+bd(e#my6GjK($~-|ya>sxKjkLuZ4AlG!wnfi`g_4W`4LUzWWH zE5n*B?Whv}Y6^UF|r2VuvP*?PU z?l@O>77Up~P8)nqZNgsx)K-l3$kU$!!-3UFF{wy9nhJS|1D?=%`?K{?0LRpvOvN=o~Ok?;(hneo#)_B%`;h?^V zKFb(AExU#HnwB|a)XiP+4;?}gN7M8wbcFrl{@Js?)nj(iirzi)O6nJ?@vrZU@>|yj zn;;KVcSG9z%&P-@GT;lOMZK+k#lTJG*MVxrpSvqDap@ZspyyxRZm62yya?6?B`QKo z1#<&b1OzGmsnNCwys_ARK8t)}%$m*xBD`)Yug-c85;0sy9ser2&+(lSQvGSq1qjRK zes={hr&{Nw1SRik!;Yi0E?hjn!PM&JLSfl?tWSvjC22Mr>am(MrsKe5)eK=g3lw}6r6-~ob~a^kzNdISIN=3XMYi^y;V!1A4UEYViebEt|0gA5xw zEdSQ|t)6z>iaGeQx2eFgZ9R>06TsG?Hp4cYCS`SjdXr)$N!E|WM1P4-c-0zPIugwt zDjg}WpUgGn85LOgv21rn6m9lg#dXGQ{~eqPFlzG}wFXt8{GOqH zZAp;Q8{ac)DOItCi3b`J;<8O8qkVEZ9PeM8q1U)P*Lx%KjYmG_|1u8_7tTG-sds2; zczfV?2e)aU8uIkd*|nBI7-3^+p>4@_V2HN`Mrf&9w5+%6UPQ@aJxRj~5-E4*e~%^CkH_l;p#0Oa*eihJ6(CX$&U4%W=WpI?5wM4e1&`yYWv1f5G z*(QI0`Z~}0W%+yB$odQXF}`Z_b0uUA zw6h7J1K+*+jqb-jF+|?Sw)FsKC9t1+%dt%zL{*G@cuk*;ea$%UG;~!*P(f*~|LDsa zEahojdT7qtMGt(h40U+hW$Lo5r!8_iZddN`=X6)*{a~rZRSAESSh-jCRVI(5W9V|a zojaN;>n4Gw_(xnoEG%!{%30dEB-n#HYJ659i{q%RsWj7xgA-Sq9tqh;Py$c$sqsOLSI33yE&FGl^`6` z-SBAnMc#57=|}xHxf{KZq!4_$>p!7ub{kc&0j93j0o@G(W9T#TdW!VlzEtu{S{jU+ zIHb@vD}O3ffCxsGldZNglSW8LfK!)VLOiyKj25D33MxdAzIO=d3dOFpyDKaS5cdKed&QO071pJZ=IfMm9-by;;G@^s_K^zpC8|dU%Ciw!=GZiANnpWL%oUK@oNjcp8wQ&v@=X-%|P?UTOp@<-|sU5$HVC42b1#hkwt zzxV+O{p7pmA6q}$$TfBpm#jI{tH;>4kev>Rwnfx-R@5RNBRpegjRLqzVi&tGC7Zg6 zpxuHFwn*N;5TRI_A(gdMC4cvy zi&p1bevI6k^98)IlRK?4azvI-f$6NE4zMFbdgr-ZrQ(ff5&3*hsH148drTLp;yUb`(`yeZJe~6Y(6|(!3pD+ z7s(EjE7PaJPj{vkgSM)s3$NpAf*FY5+lMMMHY9%Xl+HLgxjOeZK8 zG7=<+9=Za2fv`D!bSU2DD{-}p{%CkEkp6890R2ko0Kj~g4jF4>ZCkE{^6&4if)0aY z$4OM(r)aB6DCj4P`Z+j3MU^Z*$xEKI!Tut9y+4^!X0$=|2RbcQaxN|z-rH)lGqPY? zJ{e7ZQovFAZ}E!)$B|$UIdCfWWrKcm+j1&V8D@x8bj=@n(6pUgWqv#0{+*4y;EMa? zggM+xXqYjas}*SNEL`T&%I^9n1yt=F8PAU6t~-2sP?}W-TqR9w*$YaXXpqrVD+hL^ z*!xfFj*8RmslV?BF-4DlTmhUuy{20W0Yf2wNpt*5kfytvEbS9F^S3lgk|xy&Ec~4B z1FmyPr=OOZ|5%cL1bVQlEZl|4Yt&f;t6TxtGaNgw0JBwESou;l=K@zawZ}<@n84d4 zFq1#O;e@QHQOLKP`qV9#cgoXFeGgwMbPhcDdSPrO9s{93gQG;=iSGH(haOqZSc-ge zTsaIc^YZj9@}ljFG7}fuP=-0C9dE8OhyNJrVMV;6|r z+s(FnO1_o5u6gpVkaM#9n98@(nReA@w|Z;`t_?ont>V(^@nWm39d$|;f4_Sc4O`Gq zQGtp%gPg!~pSy^hI6J`t&4&|FcgR{znK^I{t>)7pXDgl6jgsIztdUiARaojOw$!%v zG1c<9?-pRYe@mFoSCzojzW7wdxTrn%6l6`$moM0OBoaIO8+2R8rtUcnZljHqKre^0 zWwfT?#9gSH{Q6hftHeMp>&Or|_NF;Lx5cQ|j7!55r7pIocP(UMbCk|?>k!Lbw_o87 zXC(4qHCB!4#tweNtjC!8=hs31O3C(Uge>YQSq4mef%Gp86a7oL9hQo;q42)tp67C} z7mogoHnPc@LTT+3bT0=i#n)s z%sWUN^UpYb`fKL-X2@%gvUGqIYXsLuNl_SMHiW-Nx8@|=Gi zK;GC)a~6Y1EQ_sKx`8z_b1;+rG0FS%z1_W#e7dg2LoI}|eOQ1=_aV}E*J|2LS#+8G z9)>@vCUn5fsq$T22qnFza`Rq@6piGm0lBnLX59*~1BQL#m~1{(Lyy&JcKy?}4&E8# z)rMA-V0SG~9@{s^l|S;V&&-^>L}GZJ)kf&V zgBi^hz*{${pMOUZa#w8Hg|asgqn@e99#lbev1121`||oYu7vlvEV^j+Hz$1@n>7KT zIjOo9CNGvAQ|#`F$DSHW|Epg)qSV^D`onE&kVFMf1AiR~?CP&`X7P_m7bIZ?(ycR! zOFj=XIZVjZKF}{9Chh_0mB~0W%1v!pm!pho|;3%gxoEQqgj zB3)&H^y3!B1UmAb&J_-gvaJ*dVN(h{rw;nX_R5^uAQ)`)a++&fI2*BKWk@@%f6Pa? zMCKl7CzUl*WD}R~aim%^@7Cqj^ZxQjGTT(PPoN)vNCL`7A%$- zO!E4$1M*&4B(FFJ{%w0Yf`xc*6~C5s7K1iHk8wD-`pOl6SzR9|eD}oT+>NB1(q94G zj>jwIO}{r|bbBeMPYS3%wSXQyP4}8=WL^3aYY<$=1PWeoQ|CSit+oOA#zmH<*|lw%<}P6V)&u?3-|3Sz;0pWgZ^Q~!@2KO zdbVn*ESv}a{I@AMakknQ5U`#c!?3ETyR6sMq&?V@hgqq!g`LFW8-|^ z*j2_NWQ&CQ`VQu#%klo}#*J*(l>v$gao(d6H(Mz!;^HSiUNq&0$3=O$f`1Uk{eQ;3 z&2B<#=wsQt7H`#!kFPgO^W6p-g}omGBG$q05o68gXI2>T&p}M6E5JU8!G&!j!^fdq z^NwkI)e4tqe4p0ezDDk z?4{(-4Ih9MMJ>7?)t(2pFg4d51X+u-3f04$_3eLy>^z7M5V%LCc}2O~slg=bV3_(k zYQe^81_?`cA74`5OxOjdlV-PpI93o8CEL{WXK!x<-TXlk)*rhi{dbo41VC>|QdGLo z%y=f1eO1P?h@jW0VZ8;mw?n7)O$MC}-rswxujEe_u1!uESC1dZP_Ev9e{1#-Yb=so zM*-*iAMJ;a`U1iT&Z58fWmsHpKY)&uI^P)C?j^}O8vUKJ}+1R`FC-~h}AXdJq zYs?vl)>Yx!(6O&0&SAJpAn~>oXsV>8A^wI1a=zdP=c%UTHtdL`QO)n?a}^ z$bAy`x4kn!Xlph_tY4cO=RLuNXJhwCU#=niQkB=0`KVfvhnM-c-e=)KXHMojfDB$k zZqO4a_-;cRADIla@=nO=bMDr$wMEJXn}8=h=q?Uxl={WR@;!{>H5@#;r)EllMbUX# zf**DvSAey^d3kto_Far+`e9;`H4gcDy@MFK)x@+G~DBzbDG1!%LD^%2tEL z#JlIm@_z8JxsDwkeuPjAwhA^o8*$f}GKI_64UOa}H-BVLKT9r0M` zTjk>7xALOig7oLRedjb2@IQdSlSkI{)S~VsApbG*K8y1hv#;)J@b|$jlI>+s@qYAA zXX|fD0SCnMf+Mqh1_#B^TCt+|{YqS7@-htv#3T7ZxG{>W=CXmLk;D^f-a!QYuXU{~ zMT$QlB?H-A?;9dXvpVe`nrp%DQ%{N=ztA7Mp|3 ztgN;#!{I_BZho%PYO>9r)*e0jZWQrHHq$R*Y|tr?Emhl7u-kULs|O#W)N2b&^+hp|Aki3ipJ8q2o%2~V@a zDdlF}%M#RLYvF9ZxS{`eI+3Ig;bvJN(b zk-kfXjn@H+$WdGs3egR`K^$zOEA<(2HK_ULjdPdpPjXFGE;h_xidXv_W|&E}MFot$ z0@Ru8C;lN$!WqJ|yf)3Y8a?gCWO{>_9Mq=F-oJNvPvO}aBS7SHcRULvWGM&Zo@4)= zwlTXVr%s-+ay!c*Xn%9Pc;Ed?lMyY&prO^@zftd|)X^=UB0q>|*a!cznvVZZZe8V1i=X)=8skEHw!d&$Ahgjie!6P0Pjb|(Z9>r-uPX&+h}d{53q=HACZ!_ zNGv{W!g1V4*uUA>eI}dGup&C0O!-zJ$~5( z2N{E1c!f-Rct5B~Y!3Kzk?klk98htbDVvO->Md@3oqE3>JOxJshb%Am@?)igZe;6~ zDVxl~n_CIf{W%^8p%E6R1v_Jo+p~A;`&AcqGfW?rL#5`_*d62WqNhxd@g9aO8t*p8 z?0;hp6lKjq(jtBPK46AvO6q&_0z%pUaKi^W>G6GH*C-Z{DMpCT+54Uv03j zz_QIId2l_s8#sKRaK>cL}$o5zkg;_!QZ_E zeT(dbbx&FZ`X<&%J7&7hTmc-~u04_YSQlL*$P=N1-gB?0vL*Nq4KAT?4hJ0U#)dbu zO(wFr7q{v(ZN<*XENBggq4$1ui!mbKr!%|+K3XoO9T3>3=|f`lh@K*CvQ@YoZB{c zOV`qEme3LEW8$%U#~YaAOoQ*4nf`&*=}K*jh45}a0*0Mrs98?npYSKx<~yH_c>4HB z2mOuf{Zu8%^NTC5^)q?InQ@VPnf2Vvaqi!>Jr7F5xY5(X%e-H;=zw_n0iewUL3n#9 zvJI^co%34p_~eFH{}c2GXJ4ebVQIcS*ORz@kl9+*9Ul+ih;IswIY554W<|kgBYGFF zjhnolsGcb+Mp>;_&k`q+vN@_h0r0ohL-(v9?x!a*&PLs@5??4=W^>M&=u9C*ie7#B zVb7a3vwE_2G+?Pa)J3O>KL#Jbb^_sD_e87b!5&8Qm9$;T8->M%9nV)j-&*r4N}7v) z0tETh*wgsP`N@{7&ctN1aOxKn&xX!mLl^a!hhNda5B-3D2riSgIbp5F%!yX&7@CvUtzPapI@#FJX^^#geW3onTrh z173SVHc~wn?lM)?f7CB6x0=~R0eUq)-eQT)?{X^sb?mMh02@R$D4xr1ye<(M>Y_gm zZc!e3(BKo-vjwDgmsQc_YmC>9F|o&FZs?QZG-C~?yN9UQ!&p|s z6-OKQ5T-Y;xnq5w#(nCEeCICG9sf~yVUbIRq!76ss3P7cKcE`8=^&ZW0kjp~cdJ*@ zQsBSNjjN=M?$_mL}0btXx$NLvMaa1mhd^!pR0Xn}l7f%s8OMPm!&{(0I0Qa)`;&w!e zzsC3Uv?r4GQnr8d@sMvE8lBSIJQa}Z11MgYL5_fiL{DTYZ=)&F*&_|_{2~p5jM`~* z864~jF&*H2xqtisBuP$&C_B1z+|_LzPx8jKR2a*~i<}C(hU3b9!j}qw$+B!NIwB!j z)PH+S9rJ{vwE6!Im7YtM)F{EIE%%jjYitK0nY)K1?cRK6&*S%m=u0O%Mh$!cC*gyN zX0Gk-rMK5y<(e=<{T;mOY2fB1+>eazDeD$Cwkx3GqjH~XfFB{Zf zqPUBkic@e&xDVUa(xSXDAh*QN5(JdFr1cPYrGl2au%nct=eX2>MrfRg8_#eOi8p0J zT2E;&DH{{AST|*TnUL9~luGm{%R^f!z^U`4>gb43yu;%Aac1zZ@j(3vg)T+f?UpTMjg-h0M0f z)BiDbk|;;I+*qGd>U+HhL^JV{f-oXCXnR%E)1fw1f7NI~DJ=Huy2=CnU;=e9*EkeNb$H*6;!Y%XvQX!o7v zFU?oZA>QQ_Ko=E86hVY{M!THC{Z+>s@5BvL4+;d05@Cvgjo;bTYr$uY-evjFWjLIf zRAhfRRuDm|FE!5y<7Mcj7{_-%DWD|VgP2Lg9X$2mJ3#!BqNWBIj1 zWdq}7CnKu4Vx~y<)_g-!KFemh&7kYCA;SDUD^WkC`>SNK28#QG^k_^>$amKxKO(i} zDlPpAXrC;5$rQoit13|)f3%|{OZ_I};mr7hwQH4*>9(7(PiZ**oL>Quch1sbI%vwT zvd(49dhpJ(v7ID&n9vmfG59#~b6jYZ7nR|FNDkdc$eSPwLizMr6?U`Gb#AOHcZi3^ z>d_?pwz?Z|fQ^UaO{QtV!J61(K(Hp6u9{TLq@KD%YT$F67;?AFO}=6SC2`@71&UCaCNUY$#s_4LSSOpE^DhPCAd%cdyxi(zGont11zG1(_Ae zf37Yn0^XPf57{$EHfslShNo`G3+%*S1eTgvSxO-h+%N0j+#GsYUm3sfqcLoagK0gO z))cFV;prXSNll7IA6jFEN_u7kacQC;p9$`iB>6^xj^?_7U(!50v;)V~1or4kyihJ% z&JTz2RiFe_G|I@V;J3GPKcE9}u$ zI>t3|fH>o`*Kr#fjDk}5kqC0g*bN9YqP;QkA*FHV{==c`2Q^jar|0jG&$kzjW^k0$>XNns}R zLu>Ax1*}(3C|DvOlB>wM4zkk3XWDw;HU9T-uDK2YsxpaHz1JnvSQl}yRuQq`iWEO- zGBTW;P2iI~k9*lvWT!AMyZ$n*&ZiJB?vYZ%fuJ|h8n7PGA3O%#l~1$h;1$~+S*o2P z4gMDU^{zX#ZBijH+Yq8`+Nw_Z*j!~_yda?9)daI40h(d)Zv-ip9jh{?FTBE|K0j9L zPo14`vTLmjjkfbUX~>oBXAl4e#*8s(18x;ATx?rShqu|ozCuKg=77{L#ufelPOl{? za5XvD*szr{G7ovSJTNuZZc0Aw99`fqTjJSPp};6TGB*r60o zPAB|1O4>1Qg(s$GESWc>woc>nu;Qedy9a zvz^Gh>jb5ffm0;v%B!I>iZsXc_bhzk8VTDxGhT$PVg76lUIa^ zl=G-&hE+Dt4_(hQbi6lyfKfCM{!Lg4*YDx-@>IP6;GpKZmHsCPjsGs9>SlEtm8;23 z#Coch>JT}B_vdd8zYf`R!g=(mC`cGsH!~1>VuY{w@SOBGR>X8Ydgl9@j z)bZa*EddK`Ye^PTX|7%)im4`Emfc#ev^-4BBWKwWK&A*N$Og{g7h(BoM-cu#>}T$D z=M8^-hfRp_CM*Ad>ugU;X=8rDvZ@#%N|jG6eSkk=`B;L}$j9PED2avEke6AqVS?R< zk2_QMd-8H!Wn;5#1;Y;(Z6ZRAmJ50{U)+9j|MYq&)#dT3<9^7hF3^zsgg~*vyx!R( z94N^>G4KkQTQTSu89b+c12dh`0_0Y=91-fZ)m_j($4aRQ3x{4>)^ z(gD^YRfw?^-iZ!q28T-w$J z3T}i`65xM7sYbOaIiwYAl=LV*die`b9<%WWoUYAZ_OU8oPUJJR++Srk$pRSCU3E9s z&2}N26*Tr$N2(G>HxX(u<@ty5kWCamFh4V-ZKx`ZU08Ll8Ts0HIv>+5JCDh)3>SdM zhg_JDL2ZOfYMO4?y|w#ZF#PzG0D82i%1_1|T{B5!Ib{zc@zl~{P_E}$Y0p|*B;utu ziZcv?WGS=1S<+y#V0&B`@1B!(KvGBPc^3MEgM2&lLGsYvKmN(xP$aXpZvCt)P+!fc zS!Xps)~&pEP;_Nk0OfgFj5D6)(to~DU=8ZZuQQqVj9y5+-8m)!9++!{UH7Nn?HPJB z*R{!8uW8UQKZ_urp03+8B{GfiO|2eH$;DfYEc8aHI_!sl^Fie>Jman&8qtBHkGuk~ zl*)SC@Ym?|PSO%xH)@%>%kwT$3hK-G_gW82nD|0cehM;)o%rlnL+p5F8?>Bs1*obE zh1wjF1!-WLFf{!-AS%2Zn$Qu?srt}9FoM%Q1-|*=<9a_YYSaY5_He@Qk#Vkjld-9Gie>O7 zdD}|V@T2}$vC&|7Tsk)t^08{B*#!3j+l0!&z!`U$nGN~u!^a1acX?0UT3FlW6iM6G zdfrxtS*1Wc3|u2|W@C;O3E#SI5|^jl7%vJd`7yqyWr5`4UbCfNCKfl!B8Y-Jb1bi` zCd7DRC6Cm20^-DLr~1gZqewI&6k07FLS%FSx^dmWW~r7>MchVb6I-XK}U@$Bx; z0lue_!~=^5t!9%vN6o$9M6?Dy6LK5O$o#xX{`S1Tullg&9Aj_LXTr*T(q6&#D(guO z?|e!%1~Df$haZWVXZmi99Es)IbB6yedkJ5PY->WGQ*#ieris_Ri+4Yf zMNTqthkj12Gm{A$Tx7b074Nwek2+CN*X?PzgLrrvLe=teg_$ERMuO&;!q|x0k01lB zTG!uwnM?Itv=%jRFFje|0)xVGLV4@&HzCdTriXw6M?V?~C#gY{R$?GAiM*|-Gj6gjig=%2FL zE#jst9tL7aI!%W-4U&5Fi?b@Q3Y+n3!Aaj<+9jz(Wnya4dN$4Tjl~;w`1-q~fsZHG zS_aPEOyYrPr4>k|5aQOy;gUQxKVQ)|jMnqjUpWpT63yu1cDk;TphTSJq$7ZL>a8*vYmgR@UKt7V^yqf2s7v^vL?{d_qI?pE1kYXZ}QvY|NomCx+dRhPTc2JGb7Zf(59?^JkY4QTPoHLgr zS?tF}Gj6Uhs&VV%VAN{>r9Yik*#O=x-$KUJQq{^Oiq73Q{$HyR=hBZ_kQZ89slSr? zXX9*!zP;d)z@oIDPRhVabS-9aTD7SbBNK;p#i@=dyjy8_#g)y=sJgEk)Q-9MJN){> z?Nb($=3nWAenewrmdLF!F@51s$`_|SLFX$_tUz9hi0j$N%^jzb<2$xKx7LFz_>(WL z0IG?kNK~5~;uB!#&^jp-z`lVD&L3F+v!m_b)hj(>T()VRE}vqc_cBXtKWAyR6#C+g z!frT*u@=axx4*QQEKDjO#*Xv{n29pQ7NL3@7w=&iR*SZO-jC2ql|UTYw|~cL$b?!Zx%w3{C6dOAWQZE0-wrA&y5Uy=8qbXw@N z&ZN6 z@^!v7WFPl%y8j)7|CX$RSr6MHr;#?`3Xtb0JMz368>R%W5kFxX-4ab9k=ge##)!QO z^c@pI7DNE*;QT?;l_@z6^ZN)!ASH`#s>dx||1;>A^%8`Ov3rPvu5O#{ZMWkV!-(y|T**Ka%Tu!gB|KR!v znO{Nk2KSLEk4|$v8(L4rq`b6w>>CXzN0UpEzt_4P#J9R7p$~yann$YFs$Z7Ll4>T>OnP@tN)96yIU{=5is3Gf_k3@wEOhqz_6vzo z^r%ELQUqSpqhfi;)X!*NaT!M{1VIU3NL7QWb2wA8ags@GBU{m&^Lx#&C~CRhPYj=m zJ}G5x@-=|5LLmHIiypStwt!#GQkKqgXJS)-Y~RS;P~et4zcVVPqNjV;{n6!;x=Rt`cyB9vS2T1=K0o7UY135|3%TR`lQThRRH z0%gzhyB(~@;vFJu`}7v|NZLl(0S{j&Ovrl_*E$#bfTp9^Aguioh%}Nk zd>;w?7XHK0oUQsGUPEy}fY`RVZDIcT3Xoqm7A2enn>&?p7>TH_OKEwHnrp2fgg2AP zKxsr;JJFqUr?uIoi$nD}uyyO`o6CKa0@sv3XPLYUm6!M(b+SDkbI9t}{|XZgPLTYO z{%-nM^H}ps*U@i+VDXHMA$X*6p4g{5?~1^__-fFtPv__qkq z1du}@5Hbik85ubxIq8oYN=^=?rlF#urlO*O(frQ^qoJdR(bLgfyT-(H?HU&+Cnp#0 z|GobE5+DTy1uYFN8w|$A!OXzS@qcgr|9$eW6Tkok-G?ZEfeZi;0}#vr{MQTM0sufD zFc1j%|1Ka13?U<@fRgSSzyLrn7z8FG2a_fbrhtHfq>q9j*U6+9kw)a#`0RZtq;anb zYM3;-_>CV<95UbbTRnmb2+BmHXcB5!WKA6WyC+!#B2%^M)=1yS3H*Pr|2y3O8I5#P znt?P85D-Ed5ez1cM4A)uzrh%L0YLM=6Jh|U z12+0)*FtIZL~@33H8KLgbY}OcCaPWpC|s#L&TBL#cF z+&_Q-<0csc1w37~?UnJ;#m%zZh^0prz2z?<^qyx(-li0sbHD-~13_ru_dh2mYA@oe z);9>Twr9+QZ^vX#CTl8E8%63qqIfv`0>ciW?qIL<2KEMHwYgz<)c*E;+!_Z-`g`G z(&!x_cmN7Z$oVr4$EKmN-QWqx3oUZ>5dcnv#|izd9D?R^P)X)qf9!*skr^39$x685 zen2fV&ABd)@^5mS%j8P>&jj{UT`VnYtJ9bSVi>`jrE4ws`1_y4mk#mmd|mBR3i)YX zds``<;L%Y@_sisT-K1jdI8|av1<|$3ychou;F~g0rbZM0DT3t|)8qcP!?wncs*JT7 zmGV%A6N#z27Ejr4PP-;MllixqsRoB@Qf1#_yGKD+AipD1z=G!8hJ?Y71Mo&ds=X#5wr*kOj-#KV?z`Gp){nm zq6J_-!C_GP836Vd90m}DP%@ETLTC}M=$_z7PpBDjcpV5WNCZMbAjhV0mr&mM#uy

aT7FQPcPCfl*ICvLtCAgVQBsl1U$0oCq`tOSha~TT2U0O$RlUbWaw6k zekc|{G|8;_emjz}@?JDCF5uqo*+~=QirW0})OOw&?>S54_$`m%V^3zsaVUfeO@@vo zi$N5^B02VgG53!?xJ`vTkC!;f4e4S@iOOAZuiIaByCk&lSRI~BBZObKQ-&i@BP(3El#~GFggls z)?P~CJZi2PnQh8{UfVRs}}5f zru4M*shD)G_N&_?{xy63;-&C(PFW@cBl;AHZV_RT|MnG1-$F*RNl+RHLh^51<7^1g z5*+;YkrtjWE5=CBR1kQ@L<^8RMnma$Vb?}nfWSe7D30_&0Xj640Rev+LkKBnn@7&m zUIyOTn!17spCWdgy~=OusTjvL^n(Z4k%gk=cHYB;$~VWx<-$%-T;?wVmUOIWA!&@# zF>#G3B?o&2Bp=O~;48SwZ82R_1#v!ou2^zyG^$%;s8^AY0mpVjTXZltHK>+b@TDyg zv!3aY{&Tn9^1lkEu%|=udL{CfF-=-Y`=KFMn{BFf8Ko1ddGNV+iT`bEM&rDh7RlW( zF(YvQufnR^apv#mD>%2|ibHwAeO#vR4w9|}hb_+f@MmZ!&$&v z)qcaOkHcbc1@70jG0EwynmK$4LqH%DiXeGdUPlT65O{|4L!byh4A=?gWC~@?q8J2{ z9HQRyIsOU6D#`Z(sK~MNAcWXEp{@#tJ0Q{5-6iXzM~$cQ{6ry-{-!58;(% zmJ5v~kYP|?yF?4=D*pili>wIGeuRcSb*faYN!`vH{RbGhJ$@_bmXLmVWfmD38ZHQA zL&O3>B2ovC`e^3$zcVj>M;lX^_GATzOT;bqhuq>ki06v}-T2Q);V7mMI7|}8cpbn* zp9QfeV`bLlRLGZYd$jWOsYSeN#mucB^015wwm<&>^1^`#BcDNrU7yCamqDx9Co0MC z9_cU}T_eWw72yC6ZJ2J(O*YtS%~q@z-S;%D&{_j%-zG>Ld~Fy1l55QrChdbeOfvz3 zu7g&$aV!K@Dl#Y|i0Vd!22HDb&pja9R`cfbl@7YXn&yIl*<|3gQP%960l%%SPlWDE z*Z%bAql&eSVfzQbfPjGif+YxIr2SM6Ofo=?R=_+uJQ+$K3k3L}VORv2h#LeAh0tgK z@K$&j5MT;K_@zPDMbNc*up?@td#wGLg1al*_eS*In z#}Vq=s>(t1Q=OEQr%1t_=dpQ&cSsT|Qx%7XSYvXA$T?Q>nYE&&AjFa(`>8aVn!*#B z03;oqAC`6z3g7w>1)cQq_=z-vh`vZ8-^TM0r7J6ZKG3F)_>Xb-zb)R_uiB^jqoMd` z=y>k;l67`GZmag-&NdctR0~c4;8oF#*fffA5kvt(13*`fL%=G@pTH@y@ZDGhTo5e+ z+`vTxe4`+wqkx9ez^MRmcccao;m8cYBJ@BZa2EyyT?~W0D`2tyvqW8~m(-k=Rr-kGvnI$f_P^8Kq+Cwssa9czdDHGVK73a$6*wTH}jO*D8)ze$uZ{P^rdKAfc&#Q z@%|(6_5GZDtuvp0fcLkHXlGlVkPy_2BZL-!3&fDN)dvSBY^2zsUl!9XdK)H0)fM>aqi)WlHS!>omN~XTP;WHEr)^acf6Eb zmu!piurXDevlwxqyx91E)q>%SX#0SqWC&TC+sJ} zrwd34LYh-{shpjl@6|;)`ws#-*X$@osqaG7q$pC+d~=LXb;Q2d3+zQ8l7kKIC?K6% zma#vVK`s)VUWPJ^GTbWsLLoP5lwNz=lZWX3)ve)(_w{%qVeUY?raq~UwN0vFcVVo!(m4f<3b8Xv_Kdxjgm>j1C|XUVdMw2 zpRu*P0sylpG>==PBPfjwj)-DJv`y3eXZnC=C;(1_To8;=mw`qx#FzA-Ih}wa^a21x z__#tEh@1k${Z^&?&uTgZnrx!F}J%L*Fx_{@Il2*D`Zj#a_b7yWj{Zr~?o_pB|wH3mL z2n`U74~x06f+nQ~WUOktkpoxdN#~+dL-sx-RHe#&HLUJ!tV^1w7IezD!VM$xe%I1ax_&J` z=)T|@R}e);7unaT0%QaC8Yy`A)*K{ud~h;~-AyDOTRV+P>wce3!z}c__o=r>UUx%A z_{al6ffU@Gw3IbY%6=0oT&d5W$&$s_g`Qkv0MNBFE$k#OE3H;>-2{$GQf!$tJ8B

_B1QY ze4gm)$}gHrED6rKcg#7I?S6MBi?#$%M60{{MY^Ck(K+~60GB5l_ai2m&&wv|ZVdcA zH47R`We${`j$8`dpWhgNb*AdfR!KCa+$HN#hS$ES?54g4E6#T3I*{-2FSSuByuM}K zwv_35uB1lTlnrm>W&U`Yr~wXuYLF}9pAs2GFjPPU+yX%Yd3r4X(r$>7X^=uk)Lhs* z!ZuP1T9_pi0c1r|DdKgo@UR(L8AcZgJ&-zq11AFDgC>XPz&V6DJ|}!v0jB3L!}tlpPUU04G@p6O(mlIla(Xkx*aT zF_|yvfN}FK9ii63f4w#$pg8S8$+5{$2qjQu97>O&gHchm(6|r9d$RiOY1w;IZf@;f z2r1aq*G}%y98=HpW-V;J?_|p>O(8gA0fsTTEh!NhwZ+cO2j^BFwn5Y)i$_l_R3akP za_a#$r<@R0dBbOqR}_3Ad{i+DJ1NTy2X>(^plW-o54Wa2W_ZspBxbz&Ef=5r;!~yd zLT2OTvqxg>%jfiD+}W$rVT%b#rrFshELNvM_os-*otLAKh{rC!zTQ0dtX@{MJZm@* z!<6YK218z8*u4~WY#h>!JPSWJ-+H|1+=1u|y0%p{td;F-OuUqIXnE#$-Sbn&Qu6=`*&$FW| zUmA>LzND#dyVeI-lek(Rw8#pQX|fD)fz(nbiEntdtf%HCguB6XVyx10MGp)``t!^h zW0chn2a(o&jO04X2Wieb>6Qz)qOaA~nZH6WC2X8Z4q;kv#ZK9#j*XD>04(xWEqDSX zQh+8C;irP&uFyO#fa*>xg5*9;?9jZ9_hB+103J0DBAW3&B=`@) zg~McM?qDdH{I#a)w~|H$gV^}Uqkp~kZ z-lPa%_=IWaiNkAR{0h;9tfT1

b$M+_Cpi6*l7 zU>dt+xyo3xx5Tf5&kxo&O8Qo2@6jbPd<9vr*S_9y^t^fc==J#w1(Y{qUyN0*M0_MM z>Ko5lv6?W~r2PO1$qdWi^YN@wmcA^FNxU(};DO82w?6RvWFwb5cc~$|WU@{h(f8q7 z@9G(~vT4Ry+S2hBu&+z74Lj#}OeL|P{CC9qOPldme-Q=N8Qgl92c;y3s@zYjzMX8j znJ+F(WJ)Y%rx&k!1&*{2)Md*uhE*%StBo2p5_W|&o21?W0^`$jY8U;v#hsml=R{d_ z^T{OT{TtU*K6|+r=g(nwruie={4Fg35U6tg%WhALfO_+(LP&EWmL8C*3K|`et_mO@ zcL{Z^H+}j;DEJOy{imqMEB%#t(P25-0hO0u)CHcWeIEantJR!$Vp}j(4uzVQ`GU3}e|cc+7f(7-ZQXdSLHmpmp-=r;U)u z5{L3Ke`}{Bk9}aj@XZ>x@`(tGx)*|zl6^AEyTod!I)QpWhS@Ugv)}1XPheU=)%l$g z^VkojFA6sbHS;l4dK7u(W%y@K${-85v_Nl!KIrvub9+Q3CWL0PJr# zjE}^qfq>w7Dwini68=sYoDT-WZecgD@p#yC1}!)P_iHEK96jf+IPP&JC$cC9Z;WYY zFo=&~O%T@GT>uLq5?{SRg0P&#veAK<_%ucl_#aVn+Pd1b`-T#3H(o{)WAk*v0xh-P zaHTQ!3m}5J6T~DV6f+SE(rDB_z;=YB&MA^&j~E%g!J@ouWM-1brjoA3rE;fwmmp-S zEskc6kr}(%@#gf6o}%~I+&R7T=pSH{Gp5t_ldVd2afv=Sjbyhlh$tT#dZX&#lCQ0G z!6#SkYwqgAv2{qh>ay@M4+})&@Pj5uCKUcM%>?=)${2BuFxY0oKh=Q%pzKs)7DSk6 z-n9zDRCx-jg$9{Kxy|-FrggbD5A8E(X zKiFT8YU*259 zCQZqRu@F_)@Xf+9yzG4~n8=LTj|rBJ2TJ zL^2G^$(IFAfuo=<_P{g{=)cWFFj1s{NZc`$k^Fy%G&nym0b)c6P)UaJYQa%l?J#v2 z>OlZ3=vkK0Mcr?>Rwhyut;gU{3Pf3VE0}88Fv1ie&Rg-XlnMCo>&%RNObk$=@GsBN zT3o9DYe7)Z?(>h396}K=;WI(>S^Jr#R+ixj@&*|~L`;O3DIx$v9p1y6Bu$Xrl8k^4 zlBmcKkqY)&KDxgV#Hx8qZCO*zNrY0vyT6i6aqLgR8RkD+$X$}{e_S7Iv2;w9gvmh4 z0eS%ZOE!c@MAlJv>-6XkCSu8zEe)TLk~FKX>+EXNJF90~}KLka+o5hN4> zfd>zf<7amLxacL~rn-MUGya`*Ht6HaxyS1K$$8$YqYw*z+544@dG_p?mZ#ndzeJ}G z?}*=+xRL`8u4r3k(W(vi)rIDmYt20M=Gm!3Tv2gJLw(i+=b(va-M$CMckJpxJxoox zqS2}~)U4B+P7yypN&CDO09F@3RXcW(gP>2@TK#~#rV;A%2mW_;v!cAJ69`u~Xq{7i!@cw$aAxy|`gAJz^FOq%(D-->=S0 za+jX1|DzLHMu*QTQ&Uf@VT9AW85PfoV;W#rbCLYWw?jehqeqPXm~GoVxfCB+t5vgg zRWsh(frqTe82qmO6WFhC;{N-Zz&oeb=$-Vw`k(CG4td{QYA5iYT~x+orBA;wtr$(! z6jT8`Id9QEeM?zZX!I_|0xe^LcuM3!lYyeh83JIp1ZX^4z$v811elaXAK?C@d!Q^Z zGScybQ<1dH6h_tkjPrcDkKe zYk-tI`CLbt;hYeQNhpn=Ur3nalY@wsS7Rwu^ViNWHjL7>73^H9%rTM8<+024kQzZbr|7v^#f~?(D#(e7zErNgaLvO#ej(NP7QkLvkVe9 zX3e{-?$n<#^<^?NSh82+P#r~cr6h`vGFINhA}%iAF05VP>j3N}X$^2#LW3oKHGu8t zk$FYj)X5k0hh3?v8%Hx-Pq$$$Z4z;J>bbwZaYtSiUeclK-1@`ui&JzoggJMpVE-Fs zy<H^ZnNdNk2M7^9LfV6I0>~kvmU9%`DQH`!(se0M!`YIJ_dHCul)=P z-$wXq6>eNxmE;#rnG$nV#C6pvL>meF_(?1AxmsCHp6A((O+eg`Y>%lO1m()D@AnOK zZjM*R?zX>wb!uE*+KlDiaCTzGS*!~{RSKzOnPNu(2UIknSqX66FDa2!g<1&4*kY__ zEnmt3?7~%%Po>N znldFGsT#fazXLHbGMU;R3xfU5uAG@yq$DaNZ~)jfC4AH zhPghYi~8qX5StcCX+g*`Br#U>jMB>pmc)S3FSw>P4=9+^CP5_^b1hcUz<=2>HO2DVBWvsc5t@fW*>K?fN6Fe28s( z_-yI<-3Yqn`>}(ZqVblcf*3;`^&@wOKlTAV|C={;{oNQ%W^Lj%j2VE&xQWS$jz_a7j+ikth)TCrU<_tr8GRokt}hoEvQ5d@rkn;qeILP`f2NeG0T zD$=S7_v+66^pBTKuL=D>@*TYHevNKz=GA9M~^9o%lry z{|`WYM;;8<%7?RNEB%P9dFTJ;=4$dBB*}1= zSINAudXA8gFT^BZp=6fMehJe zf%{X5aa3TbmE9zl?idMgWt$253mkLniIKN1Or4=0PM2>3VuYUwT-Wts@5f;H-}1%u zy{Cgm)(p|PKX5Wau0~L@pIUW|31@$=w?qoCeDD{WNeDUiUC0#%-Sd96)zZh&7Zc3s zB-(F!=Xcgqed?b4rrq~y1MkQ(9wOkui2f8ZD6b5akv;&sNz%<2NJym-G0u(%he2t_ zZyu9$9lQ%r1VRapL5XmN8_|L{B1G5_+9W*&L%t0GzCpl2-~hm40DvU9;rC*O)a72I zwf3XP6xL92HXnBJ4N-?t8l2@qd`y%*(CafsR_%V-P%nJuJmj{BAe8T`-LV;&o|B%? z6^ZNct*v2htJ2ogiNb6m;`=iMU{KIXLP{>K=uWU6Qp5LOu; z53g=&iR1C!zln-=_#=^@vOco2xAS!Skas4Sw`FcRQAJ&GcC$n8gAQRgU4ip&hvtl1o7Xq}0=mMxjO6{U)_#F~ z%Iz%u(Fq~&_ja(h=8Yfm-uFiV2ehM`ev3tp#;(4Ma1Rr=v0@!uZr@OEWw}N{mz8#4 zL+kQw0Y5Y8ky6d)U^DzcDI&!`?_JD$t7PG4rVwuZNiEN@00zzhat(^UV;%5X%*� zgT5Cc<>iYqUXJN9tq)r!a~!YjW?9_-S-F$!)VxKUS+-&amkdaunkD7836Z*AvLRsw zWU_3W>)`*iUDHvzHVhkw< z3P}kAltW|)60?_hm^6~N3b}_5SPPpr9Yq`D9F}9fh}lLWL9p0F2BXLK(a2Jfo88cB zLdM`7zV=0j^91Kl6CNLh?p8}Cq88^Gw zh@e#*BCMYrTU>K)(V!~-lunGz@gb2pC#Ljw)Q>`KNvG=Qe`MG*>!dCjx=h=>aFxf4 zK`d`%K;$?^q7I4Vj%aD+>(2?j$9s6$lR?5$Djj*FsRv&Tau4e}ToIf4SBTo9jCssRGTESU#N3OHEs4wE) zJzcq(0~( zUl?O){cRe%C@2Yt*ef*eID*6} z(t=2!B2fooq=4X~)8QZ^Eqh2B#YdnB6B8{sRfL8_-h#leSQu|PU5|9qhyWc7HHX+q z?Il$ErZYPPotmcPlnYHd9K>*UF!9wT%wXvH6_5NA zQXH7!3nmmk9WXngY~6MCR9~G*c1|eJ8%VZh0)|?rZikLBz&k77eO=a z7?GA50Aw@UcT7NSSg9yH3%CahF%Xc*343j0rl?Syoj0}7?z;vA3xY7$MM(AqFRvbW zt9)0klfpyXuuY<3*%@LBnm6Y`Uj_CLVt&n@$@%?{g7!Y`b> zwd_)W8?3CTpKMF4o(auQDhfDI-9^yBXve!&oN+IVOBMp=`_1$0fYa$yY<-w#9+;xD zLCEdndb@m+mX(~dT(6ne%>;T@G@M>tW}+dW)zfPim(C;D@5-h!{r+>It9R9cHEvmr zyP3zfg2k*c`yfX7@@H5&e{wWk(c?wWqU>20ubnSGbIH05Hy#{Qzejq2m!bhce6 zw84{Z(E2k!iF@AlBL+e;zw9c9H`{|fTvP5(K3=_c z{6OPL*zmp}nh8`8J}yHELfRms%yWO=CAsj450w2~zMH3K0UrXwF0Tjk7WPl1RA?D# zfVAL1#04D2mIlE~W7BAO${~?FU+?Zjs=x@{U?=T~NJ zCgf?0`F(X*Gr~r{hsN)WJ~;DkH0y01skqlF@rqxata>}kylD6GXSCmNhH5qY8Xsk3 zb9#1p&5FxaWuZk^dU@MV|GL}@3B`vyVg(#Cax%}`lKsD>kJgVB27WPd@9M{-2Ml^H zTHO1AlUA(q%vBVBW>F>}8Qgqo;X1bTJw02i5uhsU*T643va-F9*Myd8GZfh99k3Cy zyc_3K{F~>qXd5dG+?KT6a2bEr_xH=($1CY?fBE?BbFb{xjg%c^bJM3!`x52VN})YX zyzIccLA9>_7MQx|krz)yrZ{q|f~;AcUe#QK@S4U}#!fGD-SHhknH^d$_^GI!!e@8$ zmlFBc;6oC5ybIsx_eDF|X(i2@;`T!nRU4Fager1?Sb3Azzd}2*6HgZ1uZQFZi*OO5 zK;WMUxH<;T$(JvKh=r-1Bj67LNP4vhHv`}&BKdX;DGf(-Y!{M>0~2FlC;vfXe}oh1 zf8FpXN;t&EXC9yX)ZCBiuhq8frK zY@xLgA~$nD5yD~kM^CA!mbZxd%gpu^Yk0f$3BDVlb!S#|*-00Kn`_cPanNNNegG!!|p5OkB8pj=ndwvjD(7YSQpl_M+$EE5VM#o7>j#o1@m= zLn|t!-FFK!qcup536ZRU?_;nH-4_6teC`Xr_vgPj^WZKi&LCDvu=+m>LWvZLf*t2b zCL~HKi;$s60Rkl>m8LX5zjb+h)+k#sF}Bavt@}t&?4A!i z32knBv#rf0i2IjUnDos>f8)|J^!c8ov9 zxu+MjnL#dh&|+|x?dDnYzL-gCp}W&#y#)7`_(F{Y4yMla5L(!`7r8@AJBRr+f2&P* zExjEB-KH0=92&B6fBLDrEm|ft6ny+7S<~j!;rIUApwy8-VCI9YPpT0UQ1S|i1H&KY zZ7pRdS#i8oDQ77CGxxd6hRsVSQLOihZ|m+ih2n36&kW?xY=^?LWN|a6S+qW!_0#Sh z9zMFm=@$kA>4wf7pIkEw9m2H|5*kL96)rHASC;uDgeE)X>*g$%D^2!jsGa=GSMWc; z%sSQ6Ds}y;wln)O^5)Osr7bOXnZ@CH(Zl!t0buUjKMkkzCHFsMC z077bJ#B;*Izqi6QJ~XDb$h_U(%zHzmRen$!SZXaz{l+Xmc!h$}J?qlI$KMcLy1lsb z%q*-moPS}g&R0L@$-bzk__>>Og*CK4#^>fgfOp2~+nHXsjz!fRbV+D}?ySMU*8=Ur zm!u*JnQ+s;Uxc2S$uKD!m7l$S(>`9W^aU$WlQH|#!dti3eo5Rr`25acwDfA`?~2<^ z)6VzLo;5r+%$j}TNtZUSQT3SqAE0KBVk2c$O|E5d+#=|fl{=zp!7A3)3?okT|K52X zZhB>L#255s*d;vIH86|zZ(Ws5>$3$n<{NV1H+;k{KlD1+=$<*xJ)#sk`k}I1W|R64 z@cY`&@-xYU_PjIMHi=S$qqh(i^&`@()dHa=Kix(ZCT-5>H9ULr+#gV=qCFe z%n!F1c@}l0FwnG@JHvu=SfI7UZSqZ`Axec4sXI%LoZD)S=r^|)$C<1)BL6cqc+~SNf%7XM5K1%oc~`5c-2nv z>8Ed%<$q^I9~k)-mQbPM*N$e^mOK4ic54*WGi=ZKT+C+!sS+iJ(w%zc?OKET6qn>} zN*%+q%!wSo`+oTZQ>dG+ZEh?dUUFG{lA88zMb2?cz3!^-5kD|IcYJ7AD%;aC^u2$~ z;Zuc4py*gJT65=pL(eBRoAsUb*IVN#8KXh)1`ZygL0@|Vufqi=L1#)JFG-&h-olEo zpuyW{MN+*FAAw}3GUQz#lner^#YuV>PYB$4oQzZ?&5c&U!x$sqbqexkQ#djRLj?_e zidi$01R*;c*L5OHwKAp75Gyb<4X7fhZ<~jI5aKu_YG=?sS+h^8Wb@YYtG1&*R*f-t_FoLr%}|P}pt* znc}hC;+DC!(azgdsrZZP?E5EU9`akWb&rwFvWU_|B)mz;*C$f)E$g|2PbYVr!T3_0=s{yc}7$#oLyG_Ely%S%Pw(yB2+Sb z*B$E-!zCUrry~sa1yJ^z?tLBh<`r5uDh+ztuupE<-pI<7V?Ta3=pTTR+Tz$^N3#2Y z$H;BO)XmV)`HZ>7Vo}rTfpDr9F>-}Bn)k(hKe27K_#9ienT1HEetJUTpnTtb4qf2) zWzh$)-t;jfL^z|6*WfteiX#od^G7@6)Vh!RbE$TB+;=k~+vklz+zH8dOo!LHrnx_s z^Q<7634ujoJpKXVF34tiZdNEd@!xrHg_or=cw-=Fo{@k7)m5 zRq=-0xqgMW0N5-w`Vr_!44uVr#K->E$~M#uW=+%T45dp{pZD?tc($?v1{K;= zP@zq4&~A(0s^^k(;;%qly(Q^<*`3-tn=3A?tSoLb->@8fOPmEv)RhQ+^%E{SPOtQB z;4xXuTmFsk+-YnJ*H_EV8j4tW_2TshHR0d$$F?16H=kPH-l&_(Y~81PPgr>UA*rU# zm2o}PxLsI2|2dxCy`4+ryHEe&H$~|`txDmA`yZTgy&l}(4Pv&Ot{Qx4iz`kO^5 z=7nlDHVQ{a-Xq@os`5W3?ul&r*i-+_X$E><~GrKI6tqgdT}Om+uDK;q1XE z0Zoz^M(4tXhuuK&YJtGdNOc?}P7v`Gt_3HR>5!_^qcl+h3w(Q{Txf0sOO%eNr4Vm{SMK5bhbXyUp?mieV}$PEBQFSI zUeJq@8ISbzm-6Cyv4pzFmPvu-ah)9aOZH_5bQd4l0->#u#Jw+vJI@lJ>UI&0KaMZ~ zxjB30F2A)V1HQX9xxTQ*s4#si${iyFO}Q&;&Q*_}1tuK#&`^r)4()JL0Yrsys2*>VtX>A9zJdzH`k5r?N%zn&yH z2Mj%!Q}P$ReY3G>icNu3p|R<9nWfL}Q);mmbvAA>CLQhR`a9mAHad~ zcluP^=)Gg7-|pp;HyU0uVYd{LdWRY>+KqST9$eOj@dgo`&h2gya#&7qTsql%D{M%Rl4-_vwR5@hq0Yn!@ zawZ^E1gTmYkARO(82&L{dDG+G+Whz*U~tuRjf%~R?+&wS1|NTWr@e7vE&N*CwY_p@I#8UfY1)$8cz$(i zl#bO_tb8@+uxl^6jtdbl}$Xw`GE-p%EVC0jvrj1%~JfLz_Qde?LIzHx)+`=U#0 z2p#*W#nery5~+oey1DD^>vCO(=~GDyPCMlddmpRQ?S`rC^xHw}hB3o0{o`Kx?&?~M z9%PQ79y2Wrp+*P5K&SW9XRdqire&>WQ*Y5YD8U4J1Icc8FdE*vGroOaZX*5Q7kA)& zU)D$=E;*HVbqQG#OFUHtR*MgITN0E!-92|yw~A;|1pOPnu)f;ZIPgDNejJ|j2(O(0 z92+5Z96XJIpeYf0U?_w_fFcV#2pnYMqi+E+G1OjnVj71sk~%iH@J8``nHL0B0$>k! ziNE6rf$Ojxlaw|n%`pPP$ceW;Lkyq;|7I7S*!_9M;7e7G8FfO2%ms+v)Q{2%(7Kjx z=Ucss((6O1vC%55QvZIuf-zc@vjY|!On7`&|2+(c)Uck!t-4H&cS zWR^&F5uuX;W$>q1eZ1w?RY~nisnd_0X+|>XuTK;lnTJJo;%?6;yf2(f z5Yl3`dJ$%=Yo>7gKzun!N%w}E-jSh)S@_x8B1_3f2RjK0U-&PoLyx~Fl{QQ`xL8SN z2e~6IApRXQD(~K~ZxtH&8xg0f(W4gL788fSAd;K8P?f)QeD-J@*C01GD7VjSgK^gp z_OH!$lL;)R-w)MJdh$ScII22F{#^0@Spj54T;*Y-9Q@s>y(UrGf_1qK>S#MC4 zM=MpOryJSMwO>s?HRolepQScF*n2H})J)WMmv-(X)rNhqToI>qZuI2>~ z_pzwMUY)PJg<@9j{_WSXZ<92FUyN7hb zRk%%oTSB-s-c?1q&4gY*5K=6Ut>Dn-xt%1#Mku(w+)r;xD;s!vHvRtXGvAN*tc-{j z7%x%Pl3ZzFM(Uy`LTVBugql$$bCo}FqrH*Cw1J1(-HvUd`Czd%=ShC~H^0XHHpe8} zsU0dAj!wJVk8m@tSzR+_wRLXBWqB$-x%kSxkg8IrgbX(cTg{yBapqRC*hCJza(m(J zi_9IDpEi=iS4}f_)ojSq4T_zm8X_~<#y|Vgv1$gn^4!h))5P9zy8AUlny^HyXlWsb zQcSo=Ir|+VUHE77KQc}~tNniO)vhpQp}Xrs1NPdnzR--?a>0R=w4?H=si{e0@HE?5 z>ymq(mLHAS{ol^F?&-&wjBRwwGjf*kU)tbHoIlfQ2 zN8;j-SI+Lcy}nm!P_bWLkqz1a1_bNfg~#H89G)GPHMv;72uItA^{}Q7C@>WAT?x( zAec0^(x}L`i7pUwG?XL`uq}hpTz_V`gWs5VkG#PkA|mcB>)ncXax(T>X4ZQtWU953 zgHIjZ78T~i<1YCK@<=s{c?jLMke~2wL{|0_e4ONCw54O0(L;R&Q_<3E>VZYNFLq;c zaCtiT+!g}dfC*?dv!w%m>GQgv*`Ng&a!SPJ8RTs_YRvWm2Gn$B(>~BxSraW zAN0kjn(No`kF}SZSKO^L*LPhD^x{Ayt^2^5H*NUq#>JnJ5#}uI2`ekPw|75dF^MeF z-;b^-H_96q4!1ri$-T3{`e;Wc*rlVY=jsNI*EGXoXUCL&@0X)zdO+{l$EHxtINdy3 zU5Ai6H$qKE_iePtNIHkvDv$BZY~-^4#!qfkowvxOJ6LnRTatz$Np@^F)Nc9lA20u% zHp#%~l1;a=Hq+U%u3?UKZpfY87N4=D&5>vBfi_`7n{f*E1%r|s@F9#e|vd) zv)t;)pF9@nCjc$bH^yg@`R0Z^&8(5Um>LVCRO{T3ERN zsw|2UCGla)ReM{{7xf<1E&hJ;+-8~%>A==7VO1BU9P?{z-jhxKQ*2!a3tzp`O!1vb zr{(uo4Xu$cIs0YHwOpjr2buQ`&!5|l`^`S}2sg1hpUb|j_pBFv6Wdf%9)E{iJV3nbgTax+tihDkOZ^}@4A8* zr_sSAv0wtlAQP#Fd5D4`jU0W8WQo7w*8v*f6jE#51*s(lMk3*|KzcwXJ`fg4JE4t+ zM*TlDon=^5?Hlcf9(qualtvnnZU&^JYiJNeB!=$p7&-(&au{kDN;(Ark&-T@lrBLU zDLwoBpL6)+C$3Abz4voJ_gd?>pvvHw8)gTT9?4)YG>a7b2F-**vQTPMs#?DSe29f; zM4AF{Od4E8s8_{PJZPvCvBpFSMQY(-Si10({iCTmbGgO6AAT81nD_`7$0-Ps)Okp* zP=>TLG1#u3wMP;PXXNZ0Npl$067iRqffOX|H5aR@@EP=*7n32L+JypE>kC7pjWdM< zxlf1OR~-dsB?xx|``N5^(>lv`%!17G2BQ{L>c!iahtl`LI_wfs`+3XacujY2LN%z` z4l#x$D&gz$AVM?GLZ?RWL99=E=?#v2p4pkS|3Ix`qWMrS7r1B-wW!y^yPfjC+p7oB zbUyco$6&K(_4skRexwQmtgkr|v$MH9^qQpu6yK7~`)J&?6@E#@ERc^fyzQS{a-DBt zgp;^v&2aaoXD$wk8^wt2k74L=RtK@yCKZ3r{8*k4UmUgCN$Ue)9^ip_1Nl4{r*QW! zQjck_1%e9;Yx9nRS3|A>v(Adj2PcU{*fztr(GV=$NG^4#P&kbID_7YV$TMpBKFZ>F zmyAlNH%xz3QrXEx{^p%y@i9a>w?6zB5=e{-fAIbD?V*0c#L7G67c60cfIG(wa?S`-; zS0$y`Yjr?MbU%JsB>lq3-nc=my3+r0xCQ-#m7X_bF6G;MmtOhhNB=;J6(Q2Ep2V!( zg?kHlhhUE%=&Mjo^+ZZ<>!pMjRho1Un9DYL{>4cvCsN!kmEhI3rz7jxQNhf+9m1_fqvbe1OX8X&$R~&!Iby%8Ac-u z256hQQU(=r-?d9#&*?Ao=^OTt^0njb<`zDMBQMK@r|e`}#vQpf#%*2p6n2fPkDJ2T z=ERNlYTDa+)*d>Wuv*e;&FCfnY4vDLjun?5eziFNw#wv+Ox71Hd9YXc#ADz>pEs*J zz{NN!QnO~y@~Iv7j0+u=mF?wQIb^Nh(bICc>zi|Z0)&SWS%B$qrHoLJ=a0{e`?FV+ zP2-0f#ViAw7>}Uf***=0Q_GiOOfOjT0q_IhJWNmqLDT?`I^ZF*qr`mjb?9+}|IAr_rryNiI}j^O%1fdsy%5k7^|lgV^N{0J}T2^XVaxE5Poij%yU zJSsv8A{7)KxiM^K@QP(yXHHLymB1B^K8kGRDxFgrAL4oT*MTF23QwDcC%QM2y2$5s zL{0__QYq9l_Js10C%AMIg6NB6x5IJ5Jwmg}B_v=XRZkUuhxVTDYyK*L=O*DTP?IyK z4IajMX21^Uu8`{;aJFB>d zCYIz6$NQ;&fFr}rTLQV8y>eY9)^FjkW*Uha7id&b$Uo3O(8%~*bB*e&19j~RevZQD zZW*0P>I*C1KNtk%pPJzlu4sL^Tn~EcqFn)RCQjRt5SeMQEy^Lmuljjs);hRBH^{M) zzK4%aV!(Un{iOz0M}WySWw&MkYN%JE0e0Hc!g>p zND=3lO1>|2w1~5!qWrcjbHnpbex#HjK^O2 zxVsi2-b+vtty5ao8IvkMk5ydR)r&P#lyDGpJdm7{YADaf>6!RgtE8>m*LQ@&_3*ut z;Pcmr4A16Lmzw;^xmQm&9G5b9p0?T2S@>$@I`|}qG%fz@?^D4Zs2Q4e{JF9~R(+;R zyJr71A=jxR&?uNP0GK1xmHx-41k z2gCgRb6@7j8fK4}w~#vvELB`SH}1l0{8*lG_mCNlBUYU=)_k^-e)i!LqekPQnr8wV z28;6VE*Q+jPQissz8i4{rG=&|a-*>3u&$t?h|>Rn4h;xGD0vS%{tzy1dAKg0A{QQx z9+n{oHcwO`B{G^+*9V#oy-*U2h>FG^LLz^O>0P6^SnYHAShbe%cxtzHQRNdpCgGG1 zolykuIrL1gxtMo=jT&wwreYvsj)K4k+B?{*WOU!b!#Xw#`v=OF=7LULC>ZP{N|?RL z)1*?m1eXRd1Hy$mh*>8`f#Ngc>s*FxiyFBBj@xpgiv88+hd)ev)5J8gUeMj<#zl7)iHgl6{R%BsKWn+U&Gm3z zq$Kf){5?m!w~Jjeh+I)IyVu^UZpG}ZB{_0v7)de?=-2KDb<(`r>Fx|_FT5-TFh6YA z7j~jaNp8L!9nEggwQ}?B=Mg5ZYNeL*<-3?k1qpuPbM+R{X5N;TkD5)9^e+Z~QEL~5 z?l3yunjWm4GJ|2a;b|$>hw=*;Ute6`?(cu|RXk;NFF>^& zWv+DG@*33E4W~Dr#IIE7iK})r#zCl%UzfiPbiYS8Vg}s}DLmAa^(E89|5U}iX{OWK z@iT0mCi`Yt&DK2qG)6rr3NZmI;Bey?@F+ClIiz=@`od|aT-ybQVyQQX+_W^S6o~f^ z^DigaqsD5~&dl$9&tUWPZcaL${kPADN|QU({pP%Dj~c%PA!W4Vapv6CVggf}@5yya z`aRsaTR}sdzv?;uSj{$dukPLS(|eU$h<&6zInHmoPVO_cZ`5uXwY2?4=*`%W(fBIa zyeVW1TOz9d>#0W869KV#4HHB{ruAw@jJ$uZS-TqWi*k| zA_wPi!0%dB=J_JfbydgfqzVeJ129k%M5iFdv1PCx|(xccT zxo~w64gee}e1r=e31nTkirBGPP~iw5y9J2+4~rT)aFLw(KqenWI%fc;!`7u$OX;y= z%42k(-~jcfKan*=ogJYoXNE>9H4%c5&hNiN3|AC1%3)O90X%3gJtOg72pw#{K@li8 z8shg(Pxp<^ik|mISma27M9eGh02zO?=ME#pgwKe;NTrZT=syq!2(o#vxLO&0a~og3wq zu`tIhbBpKit<955K0+Fg_v*;i*Rk{4ehD>NT<&!0V5UDzS1ov4(d-0AS2r`-{t`H? zS=nw4FnHS8<(2vgZ-(__eJw{Rt?7*D(5+%mNO_`(!PUV4PuO|@DHC8C`_|ABl85Bi zS<7K*;?^%^`(yUeVs#^%>Am^LyCzJEI5u03VB6uerJ!3i*9Q;LBbp`5SdC`>m#o(} zfh&i8v!Zm|^_knxKEIH++uyUz)(ci!eQNu~DHe!?D6CO^zEze#zG<7ln&`Ea*U{0s zl5-0_>72b^7^|DH+0As$BH+M2H^fpw=_2H~^r1YUh*M~o0$z(B>`-8LWvMfAJ!(K* zn<0OOmRxeQ+Pc6M!Fe|4uUGP^hVMPpxi$#2G6imvAH81M%@ z&soFEL-0CxO*AtO9Qx2T^DbF*%+0XGeg1m=_+9;k%Hm`B@4gk^Z{uD%F6Q{GaIKfq zWA$IX+=R8ea7^E8wBE!->(}VC`}apxgt68?a}BVWKCHRFjp4EF3Ob&&kJf?J&b4Yh z{&l)!iiYpQUh40y{(6WrL>jpentveu;!>;JbdG8-yQ4%+M!=V#KiA^2?N1m>22CvWge8a*i@kP)4ffJD$&&IU0+$&XwBJ*n^ym`&5Wi+{mvk^h|TjXz4Sjj z5W0M(6R*1t%(tc`0q+&a_k=Q1Rx7-s_Ow2zYl6_on^~dlM6A<(}aZf+fVAu?pXzr}aa~(7G>a}P3)%%{2IvMjp~luIi-`P0If8_glo~Dvc*H0`bge zN}TiL6vI_&12wt|GS^g8<+F4aUg61t+)m!!2}P;ZPYOCUKEC)A6H+U&#Dq^b6RqE5kqimSFT* z-*t&xq{P%uZ}X(5F5b@6cQJAClQ|-s09NY4?PT<;JZL`|PNJC;t;HYa*pxuIIR{64 z!|AA6(yLg!7pr2IOcp+ROYp0??65lapaYKJSms!w#bwIyt#E>gev}~t9m#Bha;41@ zyBEIeq0LU>dnrcesO#%0m$X<0M(Hbvg(pR5VwwXymWLdgFc0qmmqQ=bmZFXwQ z9v>Sj|Nh*@`E5vZN^2e{^-{8}xMTDe9XGJ(MtuUpF;E+9n*dC!tPEtYAR$wV zSXdp{5D_AXpd<=NZgAl#5<C0D({Bxh^DvhryQcgO@Jflc>iq@=R1D z1sM5@Y|ZZa1B^ll;Y^D&rfelf#IqP=^8mG%WB8&BvdYe=#sg5G*r`6k5>7s&YOMQM z1)$eH3fK_kIA(2#IYLjXdk@Cb5w2d(EkZi?NQIJ!jf;YIPz0EW)x-d^Pg*1?D-k*J zKJ8NbK;ze_fYS`+k?6x8bL&@8;8^jktoc_Ph1%=o`7^mGX_DL?&xS2q-YeF;sGibG z{$2_;VDK{=okcp?qQ z=ND`LyyFTll$bxQNf!JxNti*6k-11Xd@@|;RVk=1%3$@EK_g}B&H7#nEYaij{_BqS z3>IHLRa=;6ySN;#!5J6Gs6{j(jYcYsYBEL^Vcl1q+w0w_xbsh$#FxkMd_BdQ$Nh*E z*iTI+GHegK+}i`+G-{wtKkv8~rgddkj@+co#0Gz;dNzy6)mi1JKwJI^=dMDf2R&WCd=hwn(G;7t8!-SABS0jEm| zNKSaqiSQ#R6~I#3eO6M175Ynwy`~xTcUs>a*ALuRr2~C3-AA@wKC34slQDnAVn;iF zu0s;_wt-hD=bd{#!$=9;{z4~mXLb}HswVThxqI4i_s-iU*&|&?jxT_JW3Z{C5MD4{ z*J--w3+rx%@9UNyJs?Ird`Rp^Dk{nLvF2A@^P^l|QM#LtA6d;9ZWws!1EKVmzVI6;IeePo~6Y~7OFA8vh1_?+4T#b6P zi&nz$DuU=1YSF2Oq#@@PI}mei5(Q=^Ng{353xoA%0PeaxSI|#J z$^0`Pj}kI!67LczbLXB*lH3-$o=?nJROtEHM%erB#k%m=the!eB%2C~@(( zaH4g6mGCJPna`n9s_cr<2J%YS*iZCGkPtR5LR|vDHb6ySNCc69#zS#+2@F9g4lzVH z)@Z>5!Eilnd}a(2J8>=+^TEjInIi>@9yTs3{!v2DX-t;6hBK0Z;1TS(Gb?@c7T(hl z1@jM7hLU{=%@Gs?NNZ~{#KV%GkuA6V`v88kMVIu`NMuLc&t{02ou0KsQEo&4(iBX= zFo^?ZzHd>OxfT62c=-?HxI2wm<>fW0^O=lI@B49+Fl*0q6PN=xnQs7LgBXf^Q>Cc- z^Cf&4M&7H`8x4wO3DJ3tz4CXuR!-Si(JIR|u$+pCMSYRL7p-zAFcvXbqviChy<<0! z7X85HUJBRf{2Fj-zUavEd3*TT6uCMtK&J?(G|#G>l^_*bNMSaX$*P!Un6W4QUKga= zI}k!fd~2occ>Dg-11K$r+uOG-E=ht%9j<&e-@e4r6xszZxf=WY%Gq~%+TKy|A}*hw zJq30Zdj%7)?Rs>`^3&z>9mqJeh)LROyIbU|)k0=LJ(om}11W$Q^Z$#(3yhtCz}V^H z_S;GxasR)uQ!Q1*1z63I&N`&*{>L8hfjR>#IS~MJf*6|;P;RM$n_Dj?6RZOl!ikiV z`h&|9U#Rr2`j=EqO|qKRdLNqo$s#gUyv%~WygS?4&x2(fZI=nF#vvq>cqxkK?=yYP zn_8YEA2xDXxv6g##r(Wx+V1rD!JS>?!dUdpYS~x*dC$zPu!kL5=zNUA|FL=v_3<%X z#MruR{ReM5zvXM&#^aq%)$$ylUb`T+3k69by(TyJ(!93y=woZ zip3z;DwUM?FD)v)${O;*;I1Ry2W>O%YZXQ%;Q?2E^fA(@Q-2>1SS<%xgn%oGmqZUE zet@hj8U9H8Mjujf~ScRoWe&ZK~0R@K~4xkNxrY>CQ#t-GKtrsT;=!zalw4qH+B zh42X&N@VVSf$_PUDAi6#T^rTYJ>e@_d7-8oSu(i;#o%`@)%pl=!N76Z3q->Jb~4Sm zGSh1?YY~cob_6$k1Sj(hI)#g?Hw=oX)P0-<_wnK3oHd-R`KGGH@345r%zIVLwt^ z!JwEKQ_%^oHyKv8SV^Y{#9wK>P$1AAVis3$1)uAN<3Mzcjvt|P1%ump(qG#tQ$$0g z0|0ulBr}YDO(v6Z>zA;iU5`D_zK`jx#!X!GKhWvkAM=Z>C7P{*z_pyWE`5(m@dh8Ca-d!cHr)zKh zeX0w1zkk5TW_1MKjU&9G#sm7+#)^D=*ZW(G?c5+o&>YoDAfyhCd;e0-W)(_H4sIH^L$i5lfE6K}4~5=ky7P$VZ@O5e>j zT1~}SmP#6#waYe5`U$^`N_&KuSuw7Ak!nlQ^!DMftG!tgFR9i!TylX@`Avo`NBN_2 ztyY|-Twxrn77W7~1kCR?0PkQl73owc*(9nfKm6lvxGeLw^99OajV!1jheK=N^i$8_ zo5B&*wpKFw$c>4gwdim}oA;bJU*tv`rTf!YxT^dlV@Pnt}t;zt7?<$%{LMvfaFp;Xc+#bcV9j?eK z`m6z~1kC&^BNBvcbK&6v5$?ysdVJ8~q*;%!vao&SGr%f3N<@$s^;~SmE6;?76)p&D zYLrv$MQ-)`@OUUcll(!vz2I#TkovuOk71kKp%CF_S(EkZ^hY071{^b$L7%*q^8C6t zISxJgs9-&B;r&nQzheXY+ILdUQo5(s{S`M7%Qfm;->b)SvWHL`rVkvL#jU%#xU?Z* zb($2j9)v_|VV?GTQ;!#>{6R{b%p14<Vk6mMARsB<~`TI?kGAZRp3Qt za7rDiea~oJD~9e``s~bN_05+8t0!ubO7`Ct$XG4kot`SRms!r{Ppd{bs0mwozI>;O zwXCf;{fuWRyKVf%sQHy%d2M%5&<`5OAfe-SwI#Z))QT=x(<>^Y&dc0DeSy}nP2a17 z@~wTKsJFn3F;i3Nb`z6NrHA&VGTGOC2Kgq;@t>1v_oej`%fqbh$VDv%Tf~J-Leld4 z%wKy=SRkx%Dm#x(yQg3p9A0x8(b}NF-@8UCwU<4I!maQA=$W{9uy=q`EFlJFJ89Qd zq=E&oPeZIK1C9*94CAAs+gjPT=T0F!W??o+oHa8c^n!g@lD*`&O-gx^Ysnk2GpHhd zA|VLS-)@jftOzJMic3iX+Kxi>5&TEemnp{LqPbxOMUXIiwPxUA6Ksd;@@J7U>xRdG z!n2+$v&rKULKME^Kub~a&_GsPf&mZ*l>;O2CZ-0;rK1?g=#R{}jEz@1y7dH@CZ{y~Oy@lyee%|E=^4@)#02X6 zdA4Ob@u0)#Rl+2Gs0OVlO-h+sev^QUGZCdkNqBJ9RC5ZY378b;XW_$Pk=E63WNH|= zWqicj$=B7(+06#Zqbnpf4*Q#DS~Yf2%Wq1CQ^Sca>%2IcYWCtu@EFgTQjWRKx84)t@|P>5!56X$c-Y8~@q4rQ(Tlez*(;D7Fw)xH4_{1g{0lv#~j1RH||-GZ$9=|Uh} zFeN;QA|6=Gj0}BCJl__S8r#JSWaJY&sf+byzgy-jOs>7FUVh2sQeG6q$vIS6+*oED z+2};l;wh7m``9wu$HT4fmu;WQniAR%hR$c?TZ1hL6#3sI4@L(~Mu{aq>g=Ouv({dR z0sk%#9F+K+oa-l-P*zFzyfm=5sk5c|gE)6FOdXo!YB((@z#Ykaqn_(nht{w|lCf-~ zXDwwqDhU{Elnvh$h)bmDY~d+6y1D)XIS3b|4k%BTmf zH53S8(Ja>ty=yCQx?d1&Q5d#emda3@JF0KnW?~6CHJQ=+n_cbaMmd~K9y>zz zE@p;zW~0bQ$_BCHPJkGZK)QT!YRtPtTWB`+Cg)8V zEDEb0r~DDe(xLZDTQZ6RFgTi&QXLB1g(6O28n35GkX8{`nFJ!(;f4=%C810r`r8QW zN~F4j&(56c_Bl3K8dWTknn3*d_(!jK1@V~- zC4=gxkOgVDm)4_#^8JO!=;n?NTgUIZza_ZeK7G0t+kp=vmbk9jdOa|=GWYt}i@4c6 zJ=z6N)#E)}MLkzQN9eM$>QQrxNRQyU`(sJI1D|U8zrzmwFDCh2)Dp(sf?oauRT^+^ zHQeO;A5f>h<)2^iw(bQ#{O`NmRv-L#Z)7~QFs0}yc*Z05Lh3~NsN42}CeV0{9@s1j zMPQx4^pi6QK~!L5aJs4pHg+J=0T*=3OoT_oZ0IJKe^aQVQ2(Dd%kt)M9Y+CrfE0a`7Iht1*xy!z%G4a&~fU4h$|h$tuY8>PU6a zDCisfqbm23>b0ea&xQ5Q=n9)N?=!J^%L5uV1W25>v{!7}YX3}dU58D=@(JH!iB3c3 z>=6!bg6$6;yLZJ;R#sAqv=_fO<@^BC{&^~3?AqWs;F}(iJEkYeQk$yVnp!ctds@%| zZ|9edgb?f}uw}D@7}c0W$N)?{u;&FAg92IKQQ)%XKpOynGV^~>H`j>!G+?C*LSS?4 zheHryv#88qT|5XEE-{$A0ZX@xl37gYKf)B$%aG}{ffBwuHXaxhK0@{>D|`DgIE$){ zMuDOygXCS*Uk%Z+rl|*#nb4W6=UfRt=2)wIhj{5x)u7MJHT}}Wut1(txvo|~`lOYWk`qRHLk*|g@&b+wqOv~{}Gcx4ydLbmo>zDg;xBQ&SR z6WyxQrqM?SNg}-#r``i`&d&w>SK5|lYxM)XM`t*i+V-g*55MJYN-^3aRcUlE?;N5M zek0@7=G(sH6!J5dFQ7{IaU1t0d15g<>hb0ej`^p`yn9`0nE=s(OulsxxKKX(e@ofc zF{+C0O8Kte9oKa;CpQUGlYP+=`K&p8pnPVFETDn}B0|i?^q=wK21&4j1;%DZ$LvRW ze5a0uU#?qANcM@54iW)i#{M76YGnLZV8DqQ#`HjDf$js=@9GJ_rkls z8>k5KQbTwQa@0^1u1?L7kcO(=Ezi61HqS~M2Pmv;JO;T+Ro=KCxjNjcw^1$)u>8n5 zYP`|%d&)S&_?`Xj;dK!*a8ZlHu6THzW+s_g$Yc9g=t1 ztO@?LP$iVuYmjZ_cU(sk-cL2OCW|dt_>ORS6&euh6Rk27QzGcHDn z&mZH^<7N5(12wg`9^w%tv(shbrYALj4z2nz_2ZGu^6fj5dB>adV4?c={V(qptXzwB zsC!CoVt%-Woi5_7hcrSdiM>g2z%-e93r7*o0Q}4jcsv1JQ4f4n4G2v!*oLAYOvDj7 z8k3950^P#m33u6!;37jrTI1rgfPhbZ_?fY_eryCKLG4Iv1R#1S0AX{8FBvUx_e@h> zQAD-89yWf?-KoQoP;V!~qZGA|!T$$Bfo_nACG&m?<`L%#rmK2JfcGYJmwW_K^9dO~ z%;ZfAdLW1@{c_I4t{$W5`Q3>{+<_?3`gltL=V3HOG%*JM2_z7}GsH%etjJ&0l1~+@ zZ{^K9|KJ1FwzbcjNLZRGFQGB}BF7m(mQ#!SH^?;ENTL6W(RV8Hho83E4Ok!c(mRLS zCywDE-IMs$VqXH>)qRc3db?M1s7XRsqCvO;TbZFB%h`HzR(z&o^XMuMIhnV0Z2;_x z#Om!2l56Gh7yReLBdnnfwm4op@_i@oER)TEHEB~d3Z`7MqtwM zms{8V7?~_qTE|Aet5|oK<*cT?{a6rxjo2)b&OB1G?%|GP=;URvoCST>0!hXoc?b#y zuZSLG8}@x)Z)@OTYnDFxnsk2qMLD+(4eO&Zw*GUim_moLXEE6Llw>;W-)!RnbkSS7 z!v@{gLWwJzKxspsOZhB%EhaBxuY0EHQTv#HE2o6T=~#`l7N-gvjP-%wBCBt!rQorw zY_;omUhWprImn_bMN9wKBblJZi%NLYZO+8QJ?V9cxbHzy=G%j0D2YUI4da=O0C-r1 zWp-;vShwtSK+MLzzt6VZ)lA3u-uLYGy@KHYCX+OF5@u|cXx&%;HDoE~kr$4jFQDn9 zP%mY6Xdd|u7dZ+8;fCsDgs%83>C>QLKiN5AG7Cq3*1#dr7Y7C_`163|o<=A;La zQ09vtwmw)>we+{G1ph#4dZXf%i*vFbQ*gR3?~|&8d8;d5Dc?UzX5F!|gW>m=I+y1z zE?U3ksQTYqcyV#Qu2ET;R>eRfa9ph;KP zg3sisRU9jzTuLK1g2Dhr@gL|oD@6zRIXu%c`HfNi&Gf&|?R`B&sH7NE3}seF<04-{ z9z-Bbq0DK2Bk_Sdj4p^FME4mHkt;kwDNbhi*PD*MB&SOi$AIX|8FIOnrk$TK4})lk{`%nKEljXf#F?+ici z)ykr?iTL*2G`+m=s-7es&f@dFVOvySbk%G$oTDrI#V~jIyRv2@8uyrw2M6}b8Y%TzYH<%Mo>B@K=`;~BfJcO-Y}>pvl=!?84_+t^M@HI z7|{jVXh?N&NtM{~Nl?+i-yFOdL5UAxMS%Noc*0#1B#}0dkTgPMBo@>)s=jiw;cm^7 z!x89o#Kls!nu$sGVSlh4nt&wWB2Z){A$+ zlSqq#k>F^BdHQxMkE0vSW^ zXb-W&K)?PPTmq?9K-g^^6OB8>g#AxODk0a66mjBX*NxyoQUduwd~AXSEZkxoY+XED z+q+3Y4qT^M7cpvSoC;XAV7sNNfUAZ7$x%%hMWyb?eonhf|F&{ry=$wIA_0p|j94v> z>4$%yE0Layn2Dk}ul}XIWU+h^G4YkqkaP1#OBvsiLMRG8xHes$c!&^g*G?t>ait#G zPH1m2q-8?~*6P+Oh7FyBRX zekLjSP~lO&J)yive{9k&kk@HR*f~q@X|m|jSW-4^(;q@gy9<} zW{loR?AIGQcG3^!>DgM*EzH*`2xDt zP~J}^-OH$thgT?x`!Cl`df1|mUw0GqdfQ1+S-!Z_`U*_*p4;p1$7E&24g4FHUARA{ zIJgTD{rPU|(NQ<=ajJRpUdM3W9Iz9BD}YB<35CcH!y2haJ%D z(CP%4OqKE6?!rxL3Tt|oK*ej+3vRqciW%j|#=Z<8mXZJCD9 ziSnVR6imuE2N&l`3b*`9PvYee{5W7%T>(?5(q9CKkcijq*QmuErz_mF{gcp9Dvrz# zoq#C=0%}l;ZotW{=N34%n;AQD`1FMa zbxSF&%)86+l#;DB93X?nI9VXxcSb z*TClXHT_jPWU}XfuI1=#_dJ{T@ymYtw}+VS+xQ0NVlW<7VsG~UUxBTi3v=KV7=;ki z__{MGU*Fu&yVeYFirC|^aaro|eO23T67^a9Uht6;#xZUm-?&3mLc8GNu_kks2@@AH ztmT&O|ExVELLE}%z}bI5G%^eCY2=w>$OIte^xU0BZ@zl|W_O3J5|AWkf$9 zXaF(0qahSy|J@2FAA%_|frtGm1{Wgd5S5GMzyTviuz0BD5KQa@q6F4w9qHE{No%_C zY0*&oooFKsB@sS7G4(RF&nTMHkzxr?1)K>kW)*fDT$R6SRVphi3It#ng>(qS=#4cq z;;KHAI!Q6XhJq*tn2A>-#_;-ayouP=!NYwxL_}_N+@_0CjqT*%{UZgp)RdifD7$Ru zrMJaZ7AE_T%tjfNov2-cQr~`9JAG;YHbt9X<{|%9>)d_*oOzIq%J%#F0+l$Ned>X3 z-tRK4MTr!+cd+ke$sIo*4zM_ige8)N-l99KMh+ny`VHom^*z0`*qekn85{|Pm-*#W zv2GH?c{0baZ)I22YZ<>XzxCJoeIkxeqcoNT@M{H~K-Am7MHS1Z8g5xKho6m_5v^s) zOzC-nU5e z*R4Pu9>_U-Gb-^i<{yYfcVY`jSN&hZT<-oqP>8|s;88dHf+4gR*M(g-5Sio!dw~Ox zaR9WU4GusIw?a^hkO`sQ=7ZSt@qAi5JM!SQ@Z4B@8o6%x-ub<}Ti)UIcxO#lYb#Uh zKhUhBI4>Z6&mB`M>CUV;8D(kX@XmP1g8X?$CuF2-4$lWN`z`uNdQ+6Bu5TGH_h;LY zWJ+H>^=NBBEs|bDC3<0SIi5G^7*76SX9-U(w_5iAR8vX*gCBE4<8XGT~{53JaZwp1^5`w~&h|tm4tg6GLGBY`ic3 zEm>Qe-&NMj+X;D_;h@r);de%@j2NULaG6RI!BT9ibAX0o0nmVZYMJ~0x!^G9`(~XJ z;07=;YqLv^N{Iuy#d*-tAb@!82W8G?C*i6X!%0GdBZON;G|X;uzZCPiwO_q7sMx!G z^SgCN>Tk!X)pF<$pk_68D~LUKp!#9QGNLs)_)!eCVOwQkSHjM4%KFIFguPkYoU|ld zZyP@2=4bf$PhQORk+ZaC{qMdbX){4O&`mABkbeqrNC2+&>0P$!*TE3DlK}&?pc{cU zs)y%Afsz>2^BkJRoWzo!B7X+=@j21gI4uYq6)AjW$hVAnw*hynScYMZGy8na)G)1{ zrkfv)@JgsYeC9e53i@`763UtBlabuUH$?4gdVj(zz0-Xo6m^W_pRzUhdUA;J1fLL} zkl`-P3>Swqei_Bp)h@Evbz<#|>Izy+g)`2FySxC`3S8vvj`k#^Yk#9R z#tO>|t?96D^=0CS)mQ+_1Ef5Q08yAI##eoyUK^z5*R>j=cr9y8^5rPFNA-Lhb85R3 z@es7B=;iFQw^OaXVBgiE7$3X9_Rnr>HXK*8`m! zsNQeAMb@r4DWbSg>je=8<5Lv~wc^)wp43-ex+F zC~g%bN0R5M!!s~t7>a16Es1#^zOcPGYhZshS1NMOFmisLFMAzEF`6fhHDAIW@56w6)4{Jbl87ktNb$d-i zla|%K{rWz!nZ2Z-$twqQ?|d+GIu6Ow!CFzxZ%ZmTkY@A8f{m< zUCF1;x&Ei4KCQ5Hz?DR$2q;3JaD{=O=g(~P|3j9`0e5I1_XMxHKfC$w6{?%{xB>C( zrs?8SQ2-6ttVMGQ4;!$F2p;=xM?#VQP^dzxpCO1l>UYp;;=;6tpIzs^oc-)g%2#Ro zjseniS>KnY*NrZsm3R?`l7EAuAJ5Y&mDb{sKX$;wFH$&N?M zMl82K=r))fb#SEROtcON*INBR6{#1_quD>aCCR&nbaA{|`xL;THwh zefyzPQo38Z8-YPu8iocz7`hwj?hqJa=%Kqi6p$9_?rxAydG5Tw`!}4=K4+i3*ZMBL zm=6G76y)I538d{Y7%mwiu^sGW6886i6wXc9A06g{)^iq_(&!v zVNK@&y);!8fls9%v9uF#Es8IH|L%ok8I2IxzcyPHH+@gKw08)}!SHzo93_*6fxrSq z)Zo$J=>H$JLRfnExj^y*t`3^gIzk5=6~zE&(g*WIGa(#>$gGWhkWM|pd0*UxK#WV~pmnW9uv zkQ~c$>zO+>_ZvgYSrfP8iA#HifFpZug5#QYSDl7nH(N3Ijjx7|seY=-a1~r~>Xk&z zgFuR-VGtCg*wUZ6vgNBBM;Z8|TKgh;`wH9oqPX%V!P;bd$KGxr{!d_Bt4NcvRbi!W zrOBX{|Grmdlba2fAR39w3}gPr+vn=RKL__o_m+ZDHl8j!iUHYJlbIDUrdfh2KOt>ar7vo7sV#?!|jM$GEJI_9GGk=ucO*|FQ{d24QK zUI;&Vb{)4}9RnmW)B5PKBGF&^8K_T|w=$fBkm*{5z0j|~h@K@3-;pLjIt~@VTi7@e zvMcm+0t2#%1?4j)!=G46PYP!n0u-)9P{V#7demEK&axu>y16o#8gZ;ba*;NtxY4ng zw^$bWdo4AgscQIpkG~Pey%~C!4zbWZxhO*s+7bX?dNVh>GudNv>1k%C=i#nUSv?rd z<@#V{GG98PLeDMxX3Tfaewl;ne1#)5ANY$^DD8GCDTwuc!h)-&*NYsMe$Fg zat@T7J6J;xQwHq4l#m-26?t^shQ2sjKr-C>pS@eME^&AdJFgF?cLU&A@VJ$377u|( zBW2!7e?QJn)zPqg&XXEH`TOs4oRoL#e*pHr+}N9s*JszU{BQ6;H&Z#pG1L%iC>jbP z{Ck9~p@~e3iI!)B{s9^7e~IkD@ZA`kUoMM+m>&v{V}uXBXcd0LNiv8b{Rn6lda&^u zyA}08jXX0pc2PQ_op^Wub3CBmCyp6V^6R$pjz zXE!3TVCF&nz~*3i-M_c2xP6m4FJ6=>&qN9=*!$-+-u?Cv>RCZeK4(gsBx35M#!-LY z#M!wbbd82#>z#72`XvB#I}o8OhH%tiHV_san!gx;%O~izEH-2sHe13n>z|&eCv|&-kZv5~3G;e3{LPUMI;}@)+(^_ z8TosnV)%0c@|ExuGASrdC5GNRjdIe~^ITY_-gAH5lsLsmK6yg9Hpw&W!k6>v3Yu1)sJC>|3L8Zsmly9b7YM)b89zybrxK*opPINLz zjn6@HGfgc@1(&1x9FJ@2rxPZX|AIi6`=>`z<+zWxX34eGyNEWX-y zxm|)6H>%G|Qq{nMtJ}9xK0T7+fGK)BR1G-t2?`G<*hY+ggA>^L0HHuc3b@*o7ac>I z0L}wT!*mOlMK75^NSH>&qrwAJ)qkKqh8K6zJOGNzT95=~Z>+{GQ`8T%t3s0k%Xlv% zqn=dJ6%hpf&sq3pf`@|&CQ}N*XK40^fSlGou)9Wp5GACOVnhGBO3C8+N_Thw4Nh-$ zq5(crmI;Mc6-jrBX_Y9Ypb&^$VbkjzZtwxVahW~W%h*{9+JLO9q?GUG2zSMltIvh6 zSbQ(#KLi}60Q4I!hWi?q#~KFW4tEpI1Gv$l5o}c13XU(-+Uwi72>+WT%$2)AsJhvd zv9^2*EoD1**cua8=#GPK%o1z$9(yeennX(MY(RBHCy(^L*gu`Vq8nZ;)a@x!~CNj z3cxZguUxxlNVr}#8j7TuC;Q@HlI6MuH+MnMa_z6gKPM}x!MmNEj;1;>1o;RsTcOd~DL(D7)zOg9|}`^Q6B;e{${) zLheW>D|UxBG!44ikT&wAJJm9z1||bb1|&6wf1|x~Js$NOE*w8--@gkKJu$3V^i1j$ z@qb{Sj*^Ri*x%WIu4C4Q?=e{thKH0O$HPfvCz652%ljA8{R~*8In#7NW@_a=8#4wh zLupbd#+)^+Oa`7d@{W)aawRS<$fh$2qZn;MpOiofq#+ANHV*v(f(Uhq=zX0!t75k}AkNi)e^B}zlrDV9{4<$VS5%;>;SIO(>7Sm$50RH|A|0MPJ(Hh%%Uo6KN{@Hp zQ8n>5_@n-Qf~IiuJM+ilu$?2AAkoWEcM6vD>%?B{@T;Sh$R6;pxggt*jPJ5g6Jas^Ek)mXGBaL+p8evnsc= zeK{K6jm~m<^Lpf@SS|I@8x{Nq^pDFpHvFi7c>NjnSzf%(pfj&d4_08#{CZF{U(D@TNk{I%=Lcj|s8$WX#Jc;ULnu zAeV)f=1+x}tFzF<(RAo{Or1E2eAw^MDdHVC({$(vhdQRfCFw>LW#B`>ESo;#YHah> zrEsCCsc8oGkEpJ&6Uv-G;f~iv)^%+Uq-{w*?)>o?a8e!beP>X`)#1+DN~fg-ct3zl zaaKi^OQ^Ih=inFY~o+O0(oIM>6j0k zNYr2y=a_H+=}B}=KmJ$R$6y^x0gKA9OnC9Cm~rxjA>=n=t?)i*f=rC3TMJI^R0KQn zr#QDG#!mRtwN%?Fp8wS7mRS2uG7axG{zIdt${cxG-tTH#@_5@0-b(g zD44dk6%QP{g7!?xaTNnShbt-k1)r#d2!W>aDJoVvL(W@sY$=ice9yv*MI z)4j)rZIhiN&msE58nSZc<+qjQ`;`e^w*k392rHg#w;SzNZJUJKUud1#S(!cSsn`0v zja8vi6DS)kp?&o^;;OS2dbK{Ou1@XeXs_dk!_%!c0s7bHl96u!9JgyJ2X5i1Z99`r zxa1c7c(()QhaBw=PO|N7j6tZ^NIo8rZ}^(V#O2n^=jDG^FWFc?SX5Z2hzx?>n-B72 z_b#a?RX2SVjk)PD3*t~>N<1gI= zD297Rvv)}plOvqJKimY)yqKSR^$e=S66;?+S$vcm&z9&}FN5%>#q)=uh2Mf^a%RsS7gr`kl z3u4kb!gI9npcPafqkrxi<|dnfi<3CNu^u#T(72>}%(#xEwsz@p9h0P`JwmEMpzq1pX zE_l?CudUm8>zp0EOZbGGmtb=x@{CgL+F_#3>Xy1X)Mb7i`urcDr)kRYl%2&#b@$5U zYBJnWB|vZf_$~h?doo}6mv7U_V_ZxI!RH(|C5ch-x-!PRJTF1RLo=NYcKk_K@qPCe z&38;d3s#~El!>y43VeJ0mO^`5!p2V72(ElUZ8)3DOsVlMz_P;sow!Y0Wyu{m_PIpp z&Hcj{&RSwmeddU-XctnyB>if__FhK0JRZHy__!_^g+8NeZ89v3!6}fs+xC&~f1rcJ#qvaGx|fjtxAc z0|gMRhz!pSmNNkq5K5j z^6V2n2#aJ(5%hD@SH&w-D7xH6JgVYPAZfQhxnjE)wyr8veBOHF%v|j_HBVNm~Df z&3Zf>L&~+!iK{?o?Xk&*!>yD9paGkuhTb?bH>LBeggUnK#pb*juc4g?6WtP_nHFnL zdt0YIo}?|e^}dfw~FDJhd>i)0Bs^C z$NN1+_bsavL5aBmatrB+Kbsq<#u*A-uDW*iXIv9Mg+*tGeI8BOO;OKcFaD*X-Mb$QwQ06GVwS(s-e{-i!Oo1hK_m2mlUz|!K zD~eCh{*B#7^ob(o%C`?cTIt1Zr#(=4vmJ#7n%dLSN3c27nHJehmtq7Mh6~mpxhX4( zj^{h%rp%s_>$X0K#h6Fwlu{_=Bi^v=_2dMgF>C)l^euK@fn!Qd+>UFDV9T>J$2s1Y z^gZs!w7fs)dyIorQ{GP7Bswk!{&-SpYs|5~(Z3zktOvbuT4!`^i~EyI4a8BhMDMz_ zgg#$IRB0wYCo+AVHpu%-CzMlF=x%EPwxXXJU^gV@%OR@3=b5je(4PYEMQ1BSe!JGO z7bG8Q(FA7MjQ{gR8BXPVv}2Nw!U{a*C64qknjy8~69y*fsTJ)0S}xIk$h0_W@Yy*B ztezPCUE^_oFm0+XyLMtyyI4OA&$}oSl0q>^GvVGAudxM?jTN3r$>P)dr{U8fav+0& znhtIT4PK3q33>Akx;s-z(+tWVvCk&La~&eDU)JNeat%F^!}8a~>vG3u52M=FkDoUr zzvxYFTQl{Ypgp6*&7^n^A(Zj7X8)%urKFXE4+W>>;3Ek}>;if`bmS#Ejrx%P09pka z!IMWJ=SRV5R1obXxr{?1;fqSbNIX4Jx%f)g{*QdH;y#v1Xt2jvIl?l&usU`@aGoOm z3ce;8&@vi`h7-*=g)S@iQ-&=!8u(oujFRvc`aXvU43K22$~{M|+U#DL@jlD?w@tY| zJH=f$?eY9?e)n*HGkThkS&SQY^P$!XaY&}|Gdeq4547aGi!IJ579I}aqW6B|0Ie;M z1W52vYx<84(;b!egwOf96P3`F1dbB(eApeL9yMUX~nggnrB(PK?MU zmhk%#PN+zG*_&+&PvXVZHmMU;%Xq*s*KoLCg9cCsow0NrL9OpKOT z7_fUv6rC{o0~?E!87z}ZL#D<~CuG;h!PB5ak=LZ$;O#%a2#2YPp0u5pAKKxxlomP7 z3b7lm579s4Am)^}nO54!_V#N4QIIyF^2%c0Bkk|!sW5TZVGjtqX6rdN;nVXfEYmKz>Iy(Q0 zMfnLN)ljXB)8A?N&{G!OKf~avwUNO0JnYyB$+o-!)fCmvjP7i?@T>kz@0K5q<`z7F zRItzXK>SqaJ>LbcO3&F*tlB4^c+GD}*^MRGx}dyt6r+zly}s((caa22Tvk@C9{i{> zI`vu+6z*t!Uvq3M7KV)m^7W3n`hiYOB&M|6wJ+*CFd^_Spw_p@MMUIxlrQAgr{j%1 zVX$KxS&@&VCPFQlUI!LcKdRrLd7*40c_2JyqrYM|5{JUWKW{GsjEzCvyOmW$Y$m0* zI0&s;%{z$8!S3D;`(16FUdVv@CuV%T1Vq6pB((ughVetKC&Ti*to=W6EWkMA^FE-B-@HwcS_qyS#vAqA2nPKC_6% zqnV=*m)SoFX8ETrE!|&m3r}i4K>&m`l=IGmp9URsr;!pd@pT{|h>8W6t#o1zyN#_8 zB7^}H=0g?2gr)MqVCPMV(dADC&Gc3NWAqNx$ar2FI_G`Io!F5MGNF!}qnK>?G~iOh z1ZORw)>mv^)8@#b?H^g=i!&|{`-gLG2a~O$dYyBA{#l%KSo_1p{KDa=kV88bVE7*8wXRI9u)1}`j?89Ys`98KP+}pVin<^SD_09a z6$7lg($TZ6Gm+SX%4LyMllgAYbmcjXRWvCn5&&f) zEfh8S=7(c$bMM)M^?7ykNo2%Zo#t)CK){Iy>GTG1o|-qFjYRt2Vb3qZ^9a4H8ChJ6 zqx{`@Rr3}2>R35IZRHU#&-%h1{Os9|aU>uB8UaGg@ZEeSA-Z39a=(V&S89`q7Yqdq z8rEGLoi|jxrgItHb_(fhQL`joy^c5Lml0aNSYiAPZ|_4;%9hq{RxxUNP#EF#8nDlj zD>BC%VP@@Lh^Q_ReBXQV(tqWHP2QOA9>qP64wW3<=KdXzUj+g-SA{vyx#fj|`J z^WovBTWtz^!BEGJuBD#y*`I@r@wOEdyJEhiot6HZ@orl)ksG%~_~!3(#J@iotAhhn z=LGH4yfQP(;qt@b?f(`fSp^JJn<|WY zY#%JfW_{ggSISd>-q%N)9q&Q0XAc`~maxZcR?KsWW|;cIv7!p3f3ig+2eU*4Y3Yzd z=Yt46X(xoX^Cw)|I-8d?tg#fBR!Jv}0SC>L>Wl!&ZGg9QEFv#DdcN$`P z(`qO&Uq1+vNTn6?{(^7dLN_Vd?eM@4v|IMf8HJ7~jTa(=d=Pz7LL{0MkxgWPW1FmU zr$0{hVozD!6(1|y@UCL=hC6>^`gP~!Bg?$Q)#%}qmEeR+vL#jQ(;~&k-V4#+rTc3< zu;gW=G-MfoyEJ@$kBJCR2j#MXo6r&9E4NIz?06ERehNJV5S()sYlA22#r=F|5a?>W z^$L;kmW-D3&7p6!&2o@|=<>+xyc<%NGvIj?S#t`NFHC5Ru63U#m)%D;i9QM)`c5h* zH%6~IZmx;;Z%X$pxx6)VDDX2oi2xo;z92p(3?WDUaMXz*y0`1qWM6qRb-fgqIpP^7 zBd!T;Vt>4|!H&{7Yg;--xF4U^aVi z!+onq$E-&`H-*EBjQlW{nu-9)I0JLKX(TycsTVoh$U3Xr9@QGA1wAW>Ld@UBE(WQ) zdd{m&hQ0*RQpHizQ8Jz>eJtMy4+^8C4jq6Mo?>taG66DNsGd_0o~QhQ36)82uKGqb zYTzkHwASnL7ny*qP_~aeiIM>4=BN%x6E3s^cH9_{@E^3GbfBG}Mak|A-a9?(cI;L`8%4 z?S_1Rn=?S$+iP4kdJ9v0JxsB0T00e0N(LvCgsNk8A@8IYa&prTsVdqGmfjJ!EZRAe zayfpOYs2ZZxU3}CVa}qIP`hT9zBb_|8B|K|ZuA4!2Axw4ia+lOy#-e|o7^5ooR>b9 zZtY$oWg2WBzkpL7?IqO?X>@0ra)2(h?hLFh)#o64q1o4EgYkD9HHk@851P&{K3Pq6 za0l@ZT%m4Vw}e|nreZvI_NBh&$rB=JJQW?w4{*V{65}hL{hfWvLM*2w%jHq~ z^?uzu#mYY!SU}NhgMOlV3u&avOEN=x5W?}T^dUNY6|A_9m>48W@Jj~$`#2O`r+BeB z3Ih$9I#!=2TIdqn{{i{YWTLF_+PHeA&K5b@TbFf+0XCJygcOcohdHi8)REcv7#+4$z`fHk(fpfMC7e)>aYa<1_A%UMspT@A-tEBfD zS+p2DZEBVcU>WEJx+v!=`GUhKEqKkAUD4I%#Ep4dGN<-p7E+OVVwHJ}Th;TRU)$R` z?pSP$1^#jf9Xg6AUqJ~3W}FPiNneHDHBKFAMF*&_z=m(!q>#0FWTJ~haZ{k6 zcv8dY1{f&Wps`#_#T+>a3d6zkPt&oZieiHDEN`{r?78Slg6j8hPAlnk@Diq&kaaZt zdu@1~ohw!v5pc@A?1ubPiM|(B>6X|<++nA58yG>Z*CJQ6+tp_6DV%`GLWY^l%kR<7 z_2obMFRBC-xtI1ruqq)Z0(nAW*<#X(rd8c}UK84y3E-w+G7cP?JCPD-nBM1vJL4U% zpOC}smXgHAUA`Ul%K6>p7SM-=Op59?k1nw7O3@MZNj20sN&VVrXMZs^gZK`vS)`n0 zNX=++vfAv#kSRzBiT;3}o2=&wN~<8r5$qWIqo-*h+y8twpa&cbzCyZzF zr4@U9a98`Ae^mEH++<>Cz(;1rfL>^?jPK2+8I^8U3jK*~Emp#2GQ(tQQIVlb={tuz$rak>$0sKzh-ed2hV=K#n%*x?SVe!-yei-xN0Suhvi1 z+p0RN^eE$fZr!5yxf&aVoizB5)0)$A!E?=aTOt_^Ww-rdNNszJsVe}TEJ8~2ho1y zO98@CS+EWfsW&31UMVG6e;PB5pptQV(F6NNF1jm2mZ+!Qpa`ce{WMqCk>yGYt$b|l zHWH{q3nwz8pjC~<{y+*2O9r>Yi z(PL$wv!1w87?^Vje9b++A>DwYMab{XMj; zeRA5GnIipZ79v37w9MzU9LfP4{Yi|pdC#5waqO;!`xT>gz7|nD%8y&U3c~OD{{!I8 z-R7D~wEqXVH*WuE4hR_oY$LwV3QN;!goLB@AxeW0(y%^|qXzT9TQveymK3Cmum+!B z?)rg_`hA9~g!giKqUe0bW*;i}MAF_p-hkc}(=+cfoP`zSs9$7~WJcIh3`kz7qa&gAPE+}y$S7p|q6sq9XE|Tl{!+ttLg~u|NoRjB zK2;4I~w!bI6^ zgqvL~4~9KjO6TKhh!L~)6EZu8?N_3I`2KG=uSTXk!FHRiSxn?MY-z5&Q+k2sI8v)+ zTiH+yhs+iF^P)01Ygs7zig}J8nB^ooLOG;{F}F=?-lCp-a+mfmu18=M z;m9Z=D)H2}%2GOjn}$*azPn1&UNeoL#(`7{j17ytGROM2n=EkkUz!dLUXwhh{`%L- zRWu)C(dK^(^=uDLuDJ`}3CvZa+i&&{!VJpoa`@$AGTgo84)^vS6BOqoI}W4wAct@W zqeOZs^RmtnSL*v!uG}{Mh?TE@7Ewb&iJN(yplG;iv>ki0cVj_+tI)0mz4fxs;t0=~ zSemr9T2zQF!c1d~-3BYv z{=C&xZDo~uV?;3xr91v;tyAma)ygiWHtxT-NNaNP;$qoqeXsOq-~+E9d+QX;>~TFZ zVlq{7sZjuqL_YRX8f71kb4=($J?-UavXy2pmP;^{O}ZsQ7!AjTqI|$u@=vDzA%Oa{ zjjm}z{{XxrVJ$=j|=FC7Fe&|&$24kVCN##~!Q zE7a7V&%(;n1i2aDTWl)5s`ME*^@9aXd^+6vFKHZWdIs#ZcSv0ZqM-i+-(tN=Y0r3S zXHdP?ExUdj|6d3t9y~!8;SOFlaMKb30vViggANx#Bq}1K-D-Gw3cYar5GwuVKabH5 z=OzaOXh%yf;3$jY-v!PPjo^>vk_jw!0u^l47p7H}=`YTu+fo`tIa~S`u0H6QF~H6;c+m2J@**dhHD3^bt5wOM z)kM0?r~u-eX;@iY6H|}O=N3#7er4wotN~!7Ok?`e`XF7DW1s`1-xQfdfV4Zc(_EaX zU!_NNQ9+nqI*RWxeyJ}jvzNOxffBTx1!LI@jk;=b(9BhwDq9H$A@dMV>LJ9#WWZWV zlM|%$$Gocc`c(3ITxT6Oi_oilu{mFsm+zf%v91Y)?_TF_tl>n7bA7}rC80y)^!AO> z!gmX45P=Ia07UA|c_l#A!R0IGc+n-d{H-T{)}!m3QSwFIZ}YKK*AK->*cCxDIMAeC z<}h9Rpo*}0y}ajKvR3f(iAR>}I$w-{ZELc-?RLq86C<{A%4CRK1=YgLx7wv5rs8Dc z9Wqpjg=RKCo`$atb{f2B(7|733xzbW?Q~A%h?WUIk&Z1TZD@Sie!xnj(a;et`a@eI-w0nb_*&o2p&NM_ytlOUX`K(P2jUw0Z>MiflQ;Q8`)<1?^xr zN=b_$OYrEe@r^zN4e(Hptb zafySaf2ikDy^rX*Cx^QqPBN;_qf>nO)(1pW}jz;hY z0z~8joDn!aK#CC+LpqsqiZBhZ>VfcFpcV3}!2xd!aYnCY;T2cu%Z3<36qBZ_{c#>z zA?$(FCi{*bGbS|zJ(>#6cg6RjBND=&$GZ+y`GA)8USuCtRibGEY>|Er9tl_%_sYM3 z8vh5-3rha-<+$C>I8U;QTg=__nq+2l?`s$ZmtC?O+Am{i9r>niS`V%1D3nRa@P4U* z0}oDIX$zSJ$Y0U;+>Un*>|R;^yBvn6D5yz`S)RG90Z8-i$)1r*%pKN|oA@QVn{D)D z*c&9p;yJrG7ztaK*>0U<(-YmI#`8@LCKma7(P=NyBD7PrnW{PI-k!JfImb7(^M&BI zRUX#is77y%{@y?M#`W z6fr($wa-8hC#Tw2 zP0?!DhCt*iRb!V)J;&MIJK!s^j)`{F2wm7t=aq}~sK311AB*#@va?MCKRLsU_n#l$ zum6&S=~ev)h=Z$({sXv6VXJ=rv>-5aWxVoTHfVSDs_8)JvDUbU%g5ixV>p8nx*^m> zJ7VhC$%qM)-PWXuMVml4ZbW0G5g^!kf{}Vq%`aseV}lK3i`AD+LZz4C58-37?sXIB z^Swb4L6h=fN~LgHS~&0H4MBtZ7+0p}sa|!E&GqiBapTygeKD(6>Rzcx{Il*0*U}E8 zzQ_q#ebjrH*lQFUf3K64VXp+=hM$I!Si+^*oh_biAhrktpP*x@TKT?tv(l)gU;my& z;v1LfHkT&YNNsV${o^tZ@d5L4>#dz&f-MCCdnqp&Cjv}OooR+nT-<+(3sKg8R;Du^ zf3L}Gphkg$OOMEWu0XvEG`6#_bWu3(er{5<4i?~J)mlC2RMBdxfFpWN88})mFpu~s z8rNit#Mz>rD{*yD>7?PsL5WAJY2(C%_k;{sN0_8RCLrW1FKrTmZjU5EC-eu~DyWv0 zV$3D8x%@!$0m~c0APfjkwuVYUwihOB$$Qvg2H}enXO6w?f3S z&61~noSXvSElwaIJy%2WyVHK8B-Z|&k3Hwxmy+i%!rnBF{{igTN4Ob|W8lvp9KZpe zrNh&gwL8&95yueVk0U@jz5s4Zl`S|k<<1Bl9B}Z3vF)U3vp~^P5ZQ)kQcyMDwUcVFh_JDGJpnB)$YeH3?4kx#Oa9zqB#L)( z&bbKI+8OIrn3jIgTvHhHdg7jOb^Xoz{rYCS^E|-_+?{3N;GZFgf@FQ^pKM_)g(qx$ zT}9ARlE*O48p&h;+CeuQE5fWQt`*hY$q+SHqy+$ZTJW}Yc*2C`9fj>$Lkn=Mzx~|u zTDO)0RR{FYckl)1jV1YIpT84-=}cd7j3Cbt_Qqw-#Z={TFQUNtl(ocdt(25)8u+bB z@!mPeNHgHwr_O5DJd(L0mjSD>p|81qrFqn*TxTgHvjb&U-9NVXI(o(%d(!6w)Q0T@ z@5IHas9X4DZS@TO?6gr^6k;764P1%ZGmkTqMQaz7Tr;2EC~J*Y3l^N3jqI(A^nO%; zo_R^UABcXDzt23J!w&c~@S@;4ggQjxMCs7;FvMlB{`lmdUuDOjb531S39x+=+bQvC z%d}tTMBBzXG;YhS)8*GzsdU~g7K`V)J|}G<{oF1+`jc{jL$BVC{T{c{D>UMxo_?Gv z1CGFYYzu`b{!;*`Lq)>B$^KZCr0JQ&n)6*=lHItytBRschR7LKlHg&7aZiy@iDAgl z$9nUMd6$Ui*IY zf4t0BH>FJFR#td#nZ9;0{JeV`9AomW+c9}Bc$B{r5$5u zl1ZAp{JR}Jmqmvap>o9>hR9>JKJSU)Avi9y!n5$GY02@+Ho7Pc3S5RWq{+EvZuiwO zRW0M-v~exEBmR#sry|#WLTD z$am6x=#;c^hnBh>Z{#idq64cZ!p08Q)uoRWN|Jy3i>PY%d}%X%!iigcRiP9F3y&De z+KCAK+)N3jD`k8g!}e~UPY>Cq$m{(qsJ|DY6Y-~tdMoeBebj556fypPo({Mk=zqUZZFo;@rGXY$j$=c^s1+L4 z7&wkQ@u7dp9L0)>==aeayf|+ z-}TpfB5@jI^29k)Ot1Jh+bcZ@wf_KEL0P67QE^=#UlnkQI4#P!^Wd^I%#$2edGQi= z9qSa-2bX5#=C7h9lUc@aY>bHIF6GI1!F2eZ zT?t~<<-!(WA-8PDUU%7?xcYN(axERP)m+6VwO}s(eq#z19e1D6Yg1FMIEmWJc+>dK zc<- znfda?U;_Q}3Z&<6bLow?LVbsXAeF{r+|f##vtEtGM4q0{e}M5QZW71&AAje(qFN&B z!Fp>NjZF<68XdKNPmL6SC~MYlR!0f#{v4*&BBfGY-1$#G;BkGEj(5~#t4?d03pP{~ zibP~EzKSZtod)Owr0iv-hhn;lvU^dAx~PnX#u>V0Kebhhjt|)xr_%F!Y5bQz*Rrmb zo0zYjSn|c^j9j!GvKfb!n$PS`*7n2i<2{CT$w~TiX_;wWkzu-Ap^d=d?zFSyZ}4pV z{@Cls)&eiX6_atSPts0+B&MD7{PdUq0O1l0j2EBc4;@%7w6ZhJi?KZq`FnMAKS~=e zTX$Xbtsz?8@kRHymw!UxQ+IpfESZ-58s#PnqIiE2e?P!Xci4dXZTvi1 zlCHawd4-~CL@k8G$<&;>Qo{$!Lyz_5e+&~vrfmKW_amnsyA4GD&|?11$t z-xNYk$4Nwm!%z;OWG_~K;j2UUvtR8N|H?i~BO~KAGhZ`fpB3AzhB0Vo?JQI0D5N%1 zXSmEk$t&WnK1Jjtuu631;^BTk4Elm-qm)LjEgevWE0Wd@i!zwa%BeP{KM!DYwCb}{ z#cCJxU)Z707xmYcQ3K#uRz?TE*A_2mECO2I9F;FFAoYqyBb{_?+QlPz8s~4e(qX1ivW4H|&f&{($i62?RD{Y3sL56oV5(%%nS2m{ z3TaDk{ZvJy(|8@xs6YxKy#XPFTmcC}1$l#YD&xO`frP;Y6nJ?0v$uxJTB+BvJU~$$ z{=M=LJ}(WkjYJZ@*6m2DBbN^ACp#9^!Qv(tzD=28qHdS@6P%55>Oqv>;zY7*`a>%F zwm5nIYKr9xzJ3wR?&HyWgzSIWeC;Xv=f6k(2e>S&+z+UXOY-NiYzU}=UBtY7c#0U$ zbH=>8HL%YhzC7#YzbiX{i-y92Jjd3b@#5 zaRy(fB$s~;z?0|Cv*9kbBw{P&v+zXn>8Ixi>xA|5gu-a^$_b-nwU=lc(~p?X8%AZB z%{6igYKA|8D0O>$9}`UL(fVXD9UqaF~lQ{P*kBmc5~<% zcjWO$rN`}KX( zz_OE$Yqd51uX|l`=iX|U_N|#-4c^*B6iSoes*zuyc3#~ohU=z?-p})!k&?Xr$BBBY z6Mk|lChpBdwsZ6BS2KliMq*~#ZLoD%E^o+b=Psme!gH96yi$`*PW~X;&(f(>BY@&< z(sbN#o$$pEgp0U0fk}Nys37diWt`KK+RBvu?e0%PiC95^7+q4qcKVe-l0KbV*51Sr zKT>UZJ0|FzGS@Oh;tPFQ+?k?o@uAE2wfVhahs#g!W;Z{K2zlraWe2;fvHMqJDeoA3 z2~~>Wj~&%5t(8rquJ$5HeiHA@Ij-C#6iCqpFCAsf^jDL&aUAs?w;U*?d^eOYpY*S| z)~4qKxMpa0H@p^%@IsLGnEO_IqUQ}2{QmVV}M9}^!*YTrUZq3=DrMl(C z<=O2wa>xCWi54MAh+zxTV4zS>A`#_zJ0o$O0yi!=&zn7uvgZ$KX-0r)QRPh8uYOcZ zgd{$h!NC=ff?^N$4yOgh{Jbr0evI0Rqqbw5G5N`yPfRe1Jb}{cR~YOd`A~3%at-fW zC2P6aw)Bd1pbe+9dP-qDktRK(Q+YTT{Ao(1eOLvo8Kx6vFoms1!R4uC4sBWUowEo2 z2SCU87@rZ0^$jWY2UD~D5`Cb#J{tUX8(<=`X3{i@&KD#i?vPUfZtg@|q5%IX(={L9 zQQK8OFr)XP0Xh945qh$h^LR01=u#W~5S3rFVd*yI?YN;KjTKL&^Y$h@-#BFz#d)!S zW5|j~cN*FTIS%e7$@|%;L@eFH;X5Ui;xrKV*pc!LHtj3S*ntomiRTLBl`THqFz@(Z zTeO~BB4qu1H<3v?8xhrJMW0w=F2>gnK&hX|@>sx@n)>+q0qJ~sAi3dQ5{Yycryys= z8(bRC#4oK~w~PN&g2oHz{1wiW;ES>pqzQxxfOj~?dk47n3eFoX%l(c4cLBV|g|fop zVYW*W=HRCiUdX3@bV`~=>{y0@2ZPGYJ`I+T)LRg~6A=kQXlNN+s)5L|6`RY@2BjbV ze!GrnwLGeA_FB$bqrsb490oSu&_x=H1@@s!}&~Wl_O!VC}7&ir{Uo z`BB&L{g3B}2>GIy&cH5K$Q-G&%agflOG)~Ki+M#GZ7omve7QR-gg)VSt(_%2JrH?m zzC4beICL2q1A-h1xYhZF6acgTva}KazX^0Nn~H}5d0+L2a!9cOY~!SxwDae~8Js?@ z-EPe*rBxO7Bs9_UPA41ldv;y@641VPNK+h}k}IhyTFBCPXiOmT1fG)j*y2Hhr=KayMnIoGcn$<2|xA^)x=U3=J zTr`@0=E0>}Bal%43%Us|wKE3&x{WO4o-F?sZ zA9+&gm0{e3Sk3YTPtfE}@1LAA4{~o?V+oRp6sfD@c!ewej6RPnk^SLE51~b`Hl2a< zlh%Sa!u<48HCdo;657rs^PRuf~1qHM?MEPnn{rT?Cd0`u(de*3iY&coFn;I z$t+YlKtsf)F(|O`Z^ZTa?7UOn=<<*IZ!_oRt%oh-aa9t4FZND;mG9n2K;8+PI7DOD zt2+wV9YtNUa2|m5GZtV*0E*)$D^wyzcQI+$!k7=8JMQ3n*nNfTpv%u&wilWeY+pTY z?~V1^iqu-XLsW~qyC<>xBH5!zj=|im^?wf8vzuqhrO;~abVli?hBc zlW@sAPJXGkiDd4C5C3>s=P>(|``!?kIdB&?WwcCX+S;PKt6q9e{=8pP=IpX@RV%Qh zPU_(E@xq8ZNEnwfbJyuQQ8<;%F%OC}9n^Ga;i)UR3+(dBHr{WJd6nz+brE?;cog4i z_Xa22kWgF4-M1}o+W4`%vDz)DzPBSS^ixDz{V{#D?fthC^O!G7d^e}oVafp?4_zGK z%jwnHcqDb@skGK7S&Z^M5$=FNVumwfm0_Pe&AyY4l}D4kl$YM8AV|m6%RZO$EHO94 zBVIJtS4>$bu>`W!@*anWK8R6H$!?wZ5>GR?yCqk@JNW9omp0u-c9P7e^2BCKcv66@ zjDbF!f~>>@AITE@Tz-de5G;z{2xHa}KvhUV+b08(IPbxT)fu&N{}wQQpDX>P^#1@4 zLGZqilvY~@Bn6dH34K9MjMb=iWiVdvlVv70-wA7ZVc8DT-9crMpvQiDhqjoH@a5k{ z>cgVtzZ!GA*w2>w&)Ke~;0@NHMwjmjG~H1A+*%i>AE#fOoO0@?#AlCYHXJ@JO5w+u z45rcYg1XPAz$5lrqp`{KhyMU; zmNNQm_^nQ>_-OTh!Rr=G{B;WS?wdq@(wkc5ytmNEUVd+Y!X*GJ@K897R!%_d27^U?Hu;e^r{ShE*@4`mOhPMRfL33Tw}J zIZDn-roQ9JbID><2=A$rw-BG2=gIchZlC_9KjqN%UlIEv{)zygUTC8zhaAu&^+3MJ zric(JK#@Z&Vj z%A(**eyAXKCz1jYQFy`&DD+TC!ybIm4X}bHQCiN>*Y0$>#27CDgU^Vr{Wd+>$mh#- zi-u>seOKR)nz-z?jCa2!bE>EwCnzYoeAEp)Gbhz^&a7h}6M!7oCYiF&Ks>n#S%tEG1Jv?=f0wC={HvmbzbxxrLO~uW1PoD z$}v?j;f7<0jyQlw1p4_aMnyWsqFQ*F^N&>xqgLjefjk791GMjgi?$WbXF0qsXsb=o zI9<5}9(>UpZGC2@g{V`vsvg}s^6}&I`?s&*IdIDQ_RiGx4MErLrC*MQr>QbDC_d=F@Qzxc90P(i*d$KOQ&7n(L2Gm2CS1+U~^No9Y&q zhf=z?V7YY@PNSaS#*j@0Q~l2fk8QJ0VdR!+Tw>R59*^boPOC4I-w$-ME zh-ft3Okmcm`NP4$Y(QSmrF|d!Y^Hf@EUfqV{wMzc*FJuJ{{RuP>_=(8_IFXa(6w_; zzMWm*ij+@%O^&G1yiA7-IC3kO);^nhoM$&2{rLP}8;jHO_FmiSx-D+UNVB~*IveQF zttP$Al?SlYnvHBbx}GM22jvSNN#xJB{ae3@wCeER+UJI$b7gYwIb~5C52x=(@il&j z^yYK;e7h{I{T+Wz)hz1RYINz`(5g6VHlqWmyc~Fe*?jI^iqA3I4tI(BSoT+8*{V&} zbeiG1RnP+uEW49v}T7K3!uK*+mT) zlokW?R7_;5REL+%R1j2C1Cpq&qX6sVvtqGm0P3EuCo2*vR>Ss8ckdUtd|a4|9A=;- zU;?y@2QG@hd(ZUbsIcr62*L=;C`4IKol2DjI&Ckg>@|%NPml{aWhC8i3^LB^8M?9d zcZ~P#>NnMnA8@(Od@sy)y&p8baLfJ>40M)|M^(Sn$MYQT4s-pt{s?f1>D6jj#m3Ks zun`JD^h7|tt@n&VY!tYvyM~KhRmW!2X?dSU%Cj4NLftkrJ2~1ri0*XTf;zpt1OEUy zYVD6r$$l8TxX+e#9>DgVv=>sNKpp$aqDR4j%dF^mFTFYc0C9^pm$PkM!Yg0f(+7SQ zz32Y`wRE3PzcriG&%+^}%velS)gS80O>f=Bbe~Sg=Cl3P{4yEr$4GDYx7SsCfnz?+ zD^uzCe=OdoC7=GD_I10my zh9VVE(%2y65db=|?o0_c~$$MjB zG6?ctIp|h+o#JfI=21~~XO;?InZ^PAQ@h|ww+kVS*kM|WXyTDpD5R`K7bJ0rSjwzX zUtoGHWYSa*?J1w)A9c?;`)j8U-#^Qt^j{JCK!^{~0>UUmK@cA_P!Q1~1Ofp7Pm%;s zGz6eOqJSS{0x9f>?4Rz(MTq>`o@#tg28ynj#f26%g2j7L1Qn5W)xd zMItA41nI^-5J|`eU?6S)?ug^NReUPl3w9sb(BGkX-{iixL&d4xeqKJKJ>4`o!uPz% zhhhrW(M3=`D>YU7tu8#46^7znE<3o&NWO)~5a@Y@(q**L^63Qg zTiPHhkYKFsT}QM2$v?FHp`m3wGyx8-?u`oEwdE`Aj?QS7mKs&HohFXv-GP*cKo^Ub zKP!eiXI9efDzvr>LEnV#xy<=XsrPl5L0JLl? zTQcsQQ2IsmK@jIocw5ti?3>c*%l1FC*S4;8mE^Ye%<#NB1z_>Q!P^ZIGG{=Xj9alb&rti)9tQHUj8Q4Zdp}?vE$LfapvI1KW$@` zjAv=ui$U(|QoC;^T{0j40QS>|eAkCQi!b)w{{WXs$v#>3y|?YYlSEs#^HQ_6U@d2H zRNPW%@(z**#d$e6GRNLO@Ydz|Y5xGUXkONR#Y^^Axp9}&xO3Z4#Pv0Q{FgjzR#m-y zLdNEm<+popT8o3|T~Q`l;BZ>@O4>aWOZLBSTHd2|ixc?Enns2Gr(xZ;rNtfMr5BFx zyeH}3PCV7 zg`qI29AyIHf}0i9sLmKW^jWo7v>-Q#?g1+iPR;HXlt~|x?qy6p6<{UB#7qUDVAyq9 zM3@L3dzE03GF3u%MH(<5135%5Pl79R**bKct3IVb?mBdwXgqA!oM+&0zIW8J)*Af2 z=k)&o7k57Y0PT-VsixCwLup-Hx4UR*{j2$qInPD#->;c|WMfv-%i0+0%H$p0;WLop zGTvhgHnpkvR{bEnaT}(;#3iq{pzIC6C8Mg;%FmW=82%#?r)InPYiU~Cy`u~kqr7x; zibNRnm)2W( zfB4>$lX{cK;TvAh>GlphIyE}3YmW^|l^I>cGy}yI0N!DHxW2E+kH$Cu01q!O^#1?` zUanELqpUjF!G_89^nmd>lz?MkNzEraV0tg3#r;KRyl4I&HDs|{o9ty2@kRyJJ+lM|G{kB)m-C8`l2Rv3maes#l7fd10x zj&c70du(l%ynVu*(R(R>Y-tq<6z?dXE+7tlQ@S0MkxBGk!dm9IaxEbJl}$14Z7r{D zYFga0j*U=S>HDdk6Dg{)>E*iSg?*vji+hQwXH))C=aC<{@8S#d-A_$;vi|_;`W#%d zDg3Bwsj`%uJ94->DWOfTbF0Jyh7T^PN%APD;A=w7_NByRsq35Xxn56 zjP8MQ&%s3p>Vj0|1f4q+1u#%msNJ^C&aHF12ZVnS!+xuLSa)S38Rekq33sWXXmH)MzdZoEIIE(c0+Mbj>oxv#fQO(bOKn{mv!an=6&e zWscKL?T@r>%hID#-(5G3d1-a6f7&#M3Ffuq<}&tv#(QSB)Tz>=ui70p>F@=^Wz-yf ze=o&yyfXc*%>Mu{N@M9(q#V}=(|C~KkU?B7<9;ib?H-v_|4%`uvBY0i0%WfG> zS0Q(IL7~+9_flv)5bTI{WS+CK(A8>MjYiP8AB$s{_cD=ec%*Fe<>Z2v-#PUzskzLO zY183rd@hX396i#C^UmGsKY;D4YO%x$w42@%W@8e%@N#zL6FfVydDgeu9YVgXQoE{} zw3=bGiN^@((R;Axym@YVW?|u%WqWOO&N^lY_nFKuvBS@gB;0b#8yVEZuSATdBp$0l zhLeJUI-YQ1K1dJ;en=o|QzW#I3vy(E_gr%3#$8p>wa#hb=62?@`uP4$RiM0n@FU2r zPF#N`kaY@GCC;P|@o>k_8B*ni{2o|`PSe^+qI-vv?^1iHx_w#U`L%iT;Nz!tPy05N zQX5}Y?s*4X+fuGPziUr~{{Y+<3(`-Fm-R2kw3n{g~V7D-n;frc@IJPtBw2G>+wHR3T2eX@8Bz%fkHCJ7c z?CtEDmP})h{v{vTyZ-qr@efWm!TK*Qe@~a~ zuGsZA<~jTR%XWiL1=XzGR+D$FZ~I32kUT@=yqWZznEY+u9&Bq?%8YR$ayM@=@LqRS zt=bU`D!UK~$H5Vgav_c};u}K7N}{8dU>80Jaq;YU@$vipz)$%^AhFI!lH2p{P)~|h^hoL%^TZ@kF-JDfwx&xBKoD(y=!__7PYJRn(AfMK!!FCrdN-jtH%!$+sD*( z_~9pQm%9G|>7{RZCI;V|h4H-gE7dZU?%Z9cU!6Ygjq_=;rCKM7mme1iaLD406Yx_! zJEocDQ(Tun(@KU1InyzaZE)qOXjKEwt_~#V!US6N4!=}T{ z@&5p(p1r;0vyCg7PoY);EO;z;3$i3hZ>EZlpr&m8>AxPzQHM-&b$W-}!90cy4K zPkSl=%FS!o2Lq%BG=qcJkVwv$@37OKS_L6ar{#S!-IyXL!@-_Pp9s{_NF%wqsAQKG zR1K=rCuw+)2HUMnto2j2WjxKG+kIA$nog%tq|b%Ura+T8CGFeEdw71Q&&TU4{{Wva z2S3s}ZeP0zzxX3{sqwdpi?|FPA{gz{ivf&&t*$mfz-)shZ8VIp)2k5 z{Z2f%O!4(!pXz!X_;a{r?lG==VQ^U%_N;G5RB>_n02g;3(rT+}I+feMZ*=}2{uM9T zv*W{)^HI5kiQ8JxXqK=8w-jgQwGE2x4F@olV_9AGA_JZCTs2#OXg3h%+cR!n#CB`}(ReE`8v+Am_VV zc`v1C(}w=ir>@OkPwBi~Zcisum&22+>2Y69yo##e1)8cJ{4REgvJ1zSQYtmy4kVq1 z6v~l`&|dI5oUTf~vEdLlTfUKsTvs-dXh<%m!b;qXvY5yVq=~Te0Tqi@rzML-0Z=(W zr39L&l?vV>)3RfcVcgQ<8BDR+D+E-17cN_|ro!Iol?3eN)@K>gZhM|!E_r9Rq-I#! z*K>eiGI}W@TJ$c4uvnNb@Nn@y3w(8cXsYat^q#moDw6^WU>i4f@ z%^GbqOL(05c*X|D&0VaHbX1ZBDVkp z?I^3`=-yf9g3l#+FM*&++5TSr$AiG zOIRvBq`2|6tK&bGJ}W&JU6(FaS*7LUk~A7!z18-mZD&dKoa)aF6x(0BbF-GU^&P+* zM$^rGJoD|w*BLhM==(OeuW3@fEpELkj;@}eM)S1bYGqlL=%OMr>y-MyXA1>?sPZZf$dhFFDg{k==XL^ z6);PS;)|;|LusBE3~&X|3h?p$dmCi^&Y5(sL~EMJ%WoW8xCNs>C|_!F=lOZRPanc| zy7s>NicM|Q@i+p&-REdBwU0h>`%A}<;ppvqolT|UA08p+=)T52o(t_ZF{fa(EP)&P zs2dzwWwcjCNUoj!Pbk48MW z^=XVHFk&A(0JPcKo? zJAbEX^tAmSRlPS_g+aq{at9q64sh^ObI-jS624w@J?>4(`S^ZkpXvGXT)8mEw-1ly z=iR8*HJiOky)8z!A9nXsZMz^-0kSaAAWv29;q#2YFS*cgapQ!z&p-QUY1F3Xmp{VR zVWdX?00O$&`W9B(v;CyhG#j&VYx_GT?Y7Y6VwMd0vn!UIFKrjrd+_%(HZcLbG&#=C0tfP;&|eRfC!V+ zM?dDGR2=GU!Quh^0U~+96DpH`3h#(Ebqq@!d%VHif#tHam&?svcKVl<>rD5RUSs0F zdGxG%I5Cgse^=!1>T(%btKNZW9Ek%qbSGHA~auIq6QpcCcID8ab*g5{f02m6w-R8u}SpE37X&(nN=bpHS@ic0vE`#I>* zLIwJu5F-c_AZP>v0RlkKGzkMIss$(n57hvJvZ;`;{MwO~L(HloOn)>P6oG{V&i-mD z6=aXm5eAR|=z`|P3jyWxT=b0f}3(=?JIIx%YoK=SE*`k?&>_YfF!kpY_9lm z$d#ooT~P;;r4eu0K{51HTSm3T2l3FT>Sy>*=(mP>W9sQFm9(MSOQwjqV5&cTxN`S# z{X;4Dd2+l{;h9fav#c=g7k=xpy7zJQoOyidi^pRxyak*8GgT^5=~B zoz!z*6PqpPUGY=KXKz)>r)SyT-Lp^7MwMH3D z0err%>EElI%75+K4qZ)Ddvn?i8(yVu?Ke%UZC;ml!r#Ksr!?75f;-wiA~A-<1$qys z^6_Qo%&hWrV`EF%>Nb@1n_AkfwR)O;8iK9GI&_@pJX7&DoMR7g4JVLaN1un1;bt?- zmO0}un)Iy)+6@}^oI3Hkw5vc0^;z6ebt=WRomaV}&gi&taWrI)bQxa*srolIK5?6R zu&vw(s+Tvn7d1nHtoJxgHq$UpJS+i##_+!X08hhk!bYfOw_Dqe)9G*1^&w?- zZCS6jsZN&-UJ8vb9nB^RRNh5bIgfkZc4&=?_?;g^$ugUl#r!=P@=ZK_$B-i&zeW28 zg)3NX!ya-2r-WeRlDPXz>F9QSq0;IGva8)}_yQ`3ANL7r`?C0Wa>BN3uSw!*ygk3d zLHjD_q{$s}f=$>1>Vj>7$q_#ccvKaa7Bu2P!3UW0RC3zCWji%`_f%sJopN*DP!Pg@ zil4jkh34hyaL@Rz_;Y0(A8B+e?K6PYdoC^N9vXG&7$5aCRZ1|M@%6vtTog<+Yiihl+V)>qZy=&;A%n(^n)mG$A)Qe`Ss#X2l)Dns348XD2? z3)du`oNtuKfhT2lifTQr875C85T@=oZ2=W?fDPFOr$g^Q7kONXlJL-W@jHfc4q=P= z@743}>N0;$`ZpJENre3QWz_Fst>$5_AhZ+9B!IqS>`g>?2JYb?=L8NOLSU(-*|^jr zjFRU$(*Tn40FFgjG)*j}r)oD8`^q-8YS1n%E^A3(EpHK*5d(#yOs;unCG%L%pA0Ut zvR3~9vl^#mEbJ~RYgUb~Qm8GXQLQo<;UvAyEioVr30yg_&nx?D=k>fi`QCV+|;vuYY;@^s5L}MQ5QCNEQcTm#nahL!(?K?qZN&MOQEo0`$#j;C(hnU;6@D97L z=zTsNxJmp!m~-ddoxjU#Y77H2xL(YCJoR`G;v;N@q-Icl2^f`9tFGzhWThila^N_+ zu2s0e;|0h0E=?I=j;WztK)@tps+4PPT-vooJ;3oh&@Ts7$vsC*6xV+d@?SeD=&h}i zvMvJ)a9>N^*;ExU-z1}7aCTfXY9mrRu{$YLQywNq?+?1hUaNe6bpHS@iF{i9ob++g zMFj#V7BYcPR00P$K#(*G`jiin0RTGl2mnwLfgora0HEw%FiOMoikmwsfb%Mdo&M-D zD4!2NRz%!EIrt*0s{A4<2Qa9;r1bbGBP5g61Dqn1^+4KzTx=H>mN_ViH?wzqc!8Gr{gkzN(r zF5FzX&H|w>%9kdjaWuP2s9Wlc)G54*tTHqASee%Qd@^<7}EPYh6S8>@s&Dd%>V{WgjJXGQ%xMP~o zM)Q$8*U{UsZlk5M?B8as#|=AbZ+ACTe;N9f?8k$Sd_1{x=YCy1hL_nY zb`9bDEen$8Z^8>q9v)G`ah2febLYpC_G9~9xVtjP_g^C7od#8;EuF^40^($D9RdjX zuU|`_CR^{C{5bPtjQeWq3r5^k;HdJ(0^;Xy?t*!4y7Tg7h8E*DT=0_Rt92RYOQdO+ zHoIN}!($v=93ou@_`Zw5I^JE$aIZz+!(K9A3KZ*80;M3}N4Q!D`4GO8z8+oqUwznp z8mmtowE|kf#vaDAibTyfG|3oWC;RDd?!(6ZKmPz;@6xh($*QmHt46Lk#V1hXrozKW zynyPTmCf--4>GZO&MR!~z8xkUv(2tj>)D+*FjTX>GIwouW9HA~z2Dl)H!FV2Km^m>#{+gZa|(MimA6u}aCM(d5>%M7O8j5)E+cxM+5dvoZ%y9$G8({~jj>Q5h@ zn~M&8Fq#?kHN9Sd1HEm-KXR~#d>-G_CaV_++FN$2)zB{(`&Vtm;l)q>=KiX0^5=N# zdu>{UYJ7B_+KoS%q!IL0jV>Y}kAfgA#&amFTDBDG4WJ>n6We_{oxPbQ;rVY1NXR9_ zV!WMXosRrB%YeE(HLTm#rx(aCft~dC#pVlLBRMN*^xn={_c~Rp1ht}=hV+pq>R0Hx ze07A6pUZ!9dqt#RmD$Oh&|rO03GU1ZL4|Q~LXal2+Q2t+h;Q8*kw^Caj=)Q6ccgNr z((m$7duv~)cBZ#>@55V$V-Koi{{Y(G)Tx!!b>rdPk(=4(mswObB!|>sAY;TB9CIU^ z*Yi%NKJHAf=D(}(arSZJKNz-^fi4VdNdN#zo_>dQ!n(#Wra{4TS~+;S4dfnbI(H&? zTaF?LIB|i_H-#2D9?tf%_eHU}vY|qk1;Yzo=QKIN=iPIGJSDs0Ij$VM6F-;E>iQYY zl*-%TrM#%>)m>Tb0nQ}Ufen&>C}@w8?J?AQc=4FcMPeKaLD~$)W8wnTZ358f@emG3 zZ0)xx$`-5PF@h#INXLuIJS7&J>6X8S3^mT;nI4(S5|9nyAUGZqg8;;Nh(ARtrq>Yo zaXe5D5Zuo%7er>m@lO610CBJ%Uy9N+y|%e$_DxD*`EzH8b#sGb=)D~#9o)Fj<>Tve z?cvJ*01gt5x+CDfq2m5&TRTGF+!fyzD*hgGAahzqMHdG*3ZoZLxotM&<~CT*IU>mC zD{9Sqi$U~W9J-lnw_+{02QrhARMO#c*d2PVoyM4#S8WYB^jvv4B#R>rrlVQJcd3#* zxi68)p6=abK$!t^RGHI~uv$khYe>G9LZGYwZh<(;F z^;_fnr}=bvUlIFA0s#O}BnY4o0uVfWP#$OlC=vtF0|*XC1LlG-f1} z5&4T7^Kf2{mT!_db6?wBL)YDWNvT^R~@=Yz^L-)LW;4C0ig3GbjK9f zZf+%Ndq!J$4&il($fVQxPC6~bTpV_Ak2SeaNINUAlVEUx(Ml^MgXW@+GMf`^MRtiu zA{K^KEkjtPL{g=9!%e>6G}!p`E(D~{IqH^58>wrXemXrVQ~pa|smb|H*q`>MyAk;P zy3wWo0OFsi$^QUycEdWpy@U9s<8Rp0A7B;zUQfx{kKvnRt7~qE)aXz%?!JTx@*uBI z4<=Xr`eQ6H{5Gwr+S1VGU)sI9hc%TO^=p^1aPwbTM?cy4Sx)n}3>dk$Bgyv-Kl+vZ z9&gM}-ao@Ec8A&v1U9uYI=14d`|&5~bAJi?e0Sk z>4C)r`;KuZ3gbC5%5G{kj~+OBGa<*A-F*~7Td~-no(=){Abd^{9*7ej`ZYM{xzwIA zT}+nGtYD~E+gf(2{)qyxZOt*)!BgfuB^>V@ZtVL>r(DG_^Gd1Ool(M1rz7x3JB5v# zT_BKhrD4GPLaYs|StrF&yZK4=oPz{&!u=;7+aEVSY`U6_m7_!MDux5@wB^Uqqa|sbTEe;QT}L)8E#Pb_ zRzGI9^(x5R~wAx=`_HA9aYsGj}eFPgE7!A=2>08 zr|ZxD88w~7P?4Rui2xATFme`J#wp|A4d4wBB)CLpakxc5adD8+M3W;V0UoM0nK7JB#s3oPQSO6mpKYW3?RWwEH zG{68rAeOJ(hLHva&mw5FNea(>Y6Y15?E7v;~U96#wUGyec`_x(1{*PHbDKUmLQ zHU3-KZ3R;0GtmjEZ< ze*XXfyxhN1x9yE%#rb2h=JoPwS9u3KEigWd$$9xNysM{dVqR2pTbH=?l2;B)ujOQG z`_~hPpGjOMRjn*-dX4H_IGa-S)vPrIoZA0rE2+-7#fD2Lou0n@^T1G`LcXdLomYzi@*H-tJ44R0z;tAPhN}Lz7{6p@sm!|ms>Hb|9 z5Ld>p+Cv3EdZ0)NKtce4$pS#o2m}cLf#><406<5Q1NxKqd^g(M%%EX5LN-4i(Syl(ZQF}V`QBib)f+z{y0N9lkGwHIZ()OzwJCFYWF@TSX z^>lyrBac7Y;?B$MzIwNed8tM_fdz8YOIY{|Qlb?rxy|7@1mG!d!AXr{eB7jpaVnzP zRDoNOHnqj99(`4#2Il8DD~@@rj5j=!lDRfWBoz!QiK_v=0wUHh0V{F{@rs4)_ywKp zUt-qpP{0c}Ej1&5ZAFzB*SPpR*Ph&-iH;ZLy|A&S2qXtLpjRB?XTfKuqG?pL3tco+ z0j{kP-H-1Jxd`2Nk>SgGb{&E2-NmKVO>X_1HDHT+U%1k@mH0<9?MJnscf`L9kUz;y z>12HSs%lTVuIi2^CUUsjw^NI@{@Rt>)$ zDnK*vRG$ET(bXOtWKKXZ7kzm2!l71}cl@mw=wDF&p-KJ~dtE^9ZAN)Zs{a7_E$V&| zCRU}y39`2%mTM3G^w;nv-U1I5m9vKOU0mb-^{I+&bqemF@ak5yS~+mmtOv?U>5nE@ z`b#(Mn%!pPVE+Jhbo2eIKUM|Ob-8{M^x$u34+%en ztHd?{(=+u7&bs~+^vC|vQ4am3rFOLQh7&HNeB305UVM zC3fNJyY95j$4r;O=d$&(gb0{~0cebU5JAQx)=(+p++hTZM9K&?yiy33^eLgAXRGQK zVkp%wXa4{Wr;Wb}@ei7sX5lNI?S`~3pKjN%GP7j32_iGqe9x$5sUxoj8m?!Eol1Z| zmXnJ47XIk;#lzeaw2wSW={==)@9hj*vX!HD5Gy!s=-=kQM~9g4exH`TuXRd>tK44q zVb5`w6}Z-!jNuIH`Yj@=)g2xa3RO_S#CoW#=CS|-9Tt%WynZH-8Sw&VXc_$%&HBE2 z>GFMFqha0D;_Ymxx}KkDO|S^2$t3Z6!;H>(@h|6<{S$s@CEPA8kVefiPS7?gG>PL4 z@dMq1GC23>Fi}-V4-nT7M~ZeIF}_r4BiBEed7MPxpI(x&KslNqHhl)uJaL5CB2g{ei7NlxYhPl~)6aph+BV)p0HfoA) z9pG%at-&GUA~^}4x);;WeWP89*OaLN4KI6$5wM1iy%*8?em}m~=YGGQxUt2_215)l zv!~=qtmZJ92>Z$l*^spczfpF0XC-mutwsQJ6~k(iYAtLsNeDCTXqHk0`_9Q-@S2R% zsI3s(s4ZX$!7r-PGAR!T`YIz>^6CuKVuDmmO&+_PU zIj@ZUkenz0fG7ZfKyDBbgaQEofpwGy5Il20LI!~#2FMx&P9Jsl@HJ=Dv)p~k6{-kl| z`&?AU7u?|ws%kv6K-}!HRTfP8Bo+fIiL>Ul60$hj#;xrbS2rh-TyxE- z7(|j}t~+QTVNoEg5(=O$39)4XIa-Lk?6xSip#K2G9wQBzmBQUsjn%H7R^j2TXAmcC zijo$#{{Y$7R9YLBx0@a!eHQe^u8*=BCbg;Fw%bpj>a!ep@VDZu;@hS?_ZPHwciFX$ zl~tGelf`Sc4-h`_BbIQ779F0na*c&XiRGPy0a=6bWY@wlm4VS7<3@~nGq&RY1@>rE8@l*vRD*COE zwPHmy-@x^me5++e;lM9MtfW~kk%Fy4AfF3_6%@muc)kTxHv!EBPYNm!0hItb06k7WSi*{vMh8g10%Z;xpLJw(S+g6F-8nsNKsS zYf2Bq29>GJe-WFp1HDU1^#p&?^5wag9rne;l6tM3w_|%Bsxow)QoS05vMAMSYUiAf*}N{(r^he0 zxn6wn`=bL<_H)_`Id!`g10yiR0^B31a5-<40uWkxqZr^aX=_Jy?m2( zPDwI3?7lbDZ8a}?Ay99&2(A_1RxorYrzEl&sn;K_j3w3vJh zMk8)$zbe@dEpy~O!0|yIy;N(`Y~8re?`Ay=N+nsx9{UH1Vc3#6WR9y$=rOdzc%jAQ z4g)v?n88BYuBQoLA<;Pn!X)PcLYq=^oKFr(0AK<}cLp)^K^DZ9cu5!#b2GOhp|qV; z76L(skswA+_Q2#?c(C%#V~LT1M9=m6^!NSxQ$kCFs{!T|t)K+r58 zApz=uK+p&h2hI>ZkR$Rz*gF{Ju>9J;)ljy=2uu}4wDwsR0VmBygZ$M>TvLv_qAh6v z0m&5?oS=x?$pVomBsoF{Kk9>1+I|zE*6PR8f znOy3{FU#ceSyY-r2{dIf`6AMr>VN^s*oS@32 z*bAm8x(p^+Xo*m7j1nYtUVC=au+Zt!yA7dDyfsM>ICYPgVor=&KWn>s{9@)6KF*6y;&C2C}f#;pB?aOIXdnvz#f=2%U zRZ-ymtnFpR^;gnk&jC3zy2e>IF>#e%T|A{Ed>!XV|U0x{t}fb4O(LVRW04L2|nqNk#C7|L? z>vE-yjT?%zp4ZZ-TUEa6*kQxs7i@B~(>6-&`#tRfvrRi~V0V=8*4{LDn;*V8E&k zbMAD<5(|reX-I<+QZW;=mxrIGxcQ!+Lx-rDc)r}dR|go}@`d8H>Q`N-_9Xkor2hcs zSjkeQeRoF;e}|!6aO!i%)k}=5+B*B9D>5USSF;bO{{Y<>ykCaK-&CPkfY*bMMegJ3 zaLd(lxLC^LYoLQGyC={pG8hSLPoe?MnF^wsl#e12T+X~4+VsI3iJ70G_}@|Y9r&82 z7K{3r+KBp<^5@*^@XBZb^yBxNA27RN?lWC0Fxc4nFVSJ?=c}z7FDG!iNQ}CN87iWt zx2b>>Rzn^ldIUskTrf6EgFxJq3f#15r@QSvYb%zz#k6bSwySD!7My}1%`GEk`R}Y_ zk2W$(>Gi&s*5%V>a@O`w?Z&uT5B7DN+;Z>l8<~-wJp5PZ@6&JP_b2rpU-qWYXd25} zTKz785C?+2R>mWnk0qb}Hz!PgsQ&;suV=Jdz(J{{RcHW@+1+sr$0xM(S-(!r(;wXMQ%hu{?&$E}GdeitIcNX(ug$ zIek+{qIq+h;%|TmA`gYfGIvp6rvQ$1G9+Ug>=lbrX>(o2f?5f2BP4RmbL6WxNK_3L z)oM63v*3}Fzg^UcZ=F16TDkA_Yr4O^QQt!g?{s+Ujz0W`Snq6|pq zvfK&Tei0G`feMXv@RKWtBT<5I<-w$Zf#>|zqBa(^`?hD^IVE?(YBIUSXj0>xEPyI3 zR7@)okxyy)1*j{nx|Cql%ZlTZLUxYYn!fA51lCh8iBhdbwF>npR$-@FlbuejH*VK3!3W~LZr`rMIh5Q$XLM-K zBmxEfLIi=JI-q|<2>^igKyyF@2h{^WLIN^?^e6&~>?&<58h&k5s)SHM*dbAMEP%2t zALyc!jxg7&f$-#H&zv@RmZ??E%zU$_L z?oY`m#p-2pttc3?Ps}AXcD>Dp4vdCBayg zp3oRGzeTH-lS?VF)m#7@u2$`*N1^s+;_jnI34&p799X$MtFXS#XwcxRT97Y0O6lp& z*QZs|?DX3b`*JDVHM7U)v$5sfsO|kHHl-dXHlrjw9m!O9$7#DscXw6H5*s_L*~x!XrIp@~~{%OzcC_&cp(O4!qJUYNqcg{=T}`XGs>cWyknDPVzfnC}pQ8?356 z7i1rbg1Uplys1WY14Z=NH#BL(ME8{gb`V5vy5nZOrA@T$i9hewyW{qR{hF_j%tCXt z_HU@$`VRE{n5D}f%IN`DO#l~altywI%EbQw=n3DCQ#XM0Mb^c zCO?$bUT<`_9|osw10qFc=hU%oa^d-jtN3Kox}(NJFKp@ncNM^&`-4{JB)>5Sms3jtBv;RsmfGVeu^mJ<_sSs7tz1GpnMM`4n=q4>UlBwX5lMo zY1#&tMP~4MuYVuZXZxd_;`}z&_q1w>W5F@BuYMf(W$L+EVQf}&>{!>THmGft}R&^*h6k0YfZg$wN{9knc04(24{YUY< zQ(%MnqhaK~Lmp;*ASt(oB*N(tO~u_dPaDHWs^yn1IQy+g*VNXiG{YtSL3y+5@SeQ4 zIO>YJ<=f;_X8_tx>&u_iGQVuoQ;IMv8=BwWRW2PlFBW{6W$w2mgxX=afeQ^8Jh>pO zpY>2gxP=5oNIprikjIh;KUH8IMvM2R)9M0t%8H$)Sh$CGL8q%_r|7eIF}~w?@?||% zRl9GkvuCJX(2av?x(C3nIdoii;p)TcJox&`vsv0M)=?lE$~S;=xunu|k9nXw>b&{= zGG9-37uUJ*_vM{u+udH@7rJ|%!hB}ZtcZyI?x=s=d2{-1Q`6{ie^E>KV%^z$O3*7* zABm{zT980(%_ma<#PWMP!H-4d&!x{Fhtc8sw+4PYwfMmtK zNg_Sd#1E@i8;zddM-E(b`)sGE-ce?qR-;@G3rxX+1QYd+YcZ^@o+#&+yt{rPPt|^l zifb7DbJXSC$(8Vmth1xhXnBNZwQ9v?O6jHGsxvI>b3paKa(qHOb444QKueOBTjTW>X*HSV4x z>bTaUZP|X#-S&rCp`}|rw0q7fmp4ztC|lDURjWMoV+5{zxy)wkSk2e1+G+YWt4iLL zD{|ce%l0}IlZgPahFx=7H>+90i1Bh?Pw9V6l76Qo-g$rOIQ2b8p4@&OeXmk9b-VgR zlik;QWb_9gMfv{#sbMka%0~gD3S0H-ewlZ{jbOwt3*J9O4FrFh0uTWJfIuKHfaFjJ z2K9(0?~HQ&wZdV2_sk=*40?l?RnDpro~tUc zjU3jI3e^AwX5_3Yl?#gFYe=9FGz1`EstG6!@DOws`Y^TAe;ak-sZ;x!eB-HB_+^Do zld4qeS3hk!j?Z>_t<6^WPY+eRabql=;n=>%vXv@seLof#Rvqn4PQyy1X|Q*dz}Rd( zO3gA;P6mN}1iQIfX}?Ygf8G8jq;d}i> zE_Fq_h;m~_&Wo_UII~#*%IJv!{;Gxqpo5yTV4>83@>!~{yNLk&)Xu z)!N#0n@j*Cfy|Om)oM*5!;vdO0A)l-iA5Xy6%pb2C?wGaK_K}nDl;f*mz013{B0kV z*Nx)Zx;fq|)azGN>)f^U-^4h6OUc&qOsuiI zC5?Mb)U4?ItRHpZp4}NuaBW&-n1>AXU3jjtiG4P1)vf3>ibKAs*Jk|J)a2{2$HqQf zXC16(>UXuzj;{-Ub+`+~pI6D3&YdO_D}YXNeDb(ffM}fI1%OG~I+aa=0#Qqym5QjD zCmWznP*zkGyBP~2+D<2RsYX>ZBX!au+`!nX7%)7%(_*TZ4QYa4DRLElGMGN_S?`OW zZfHD1q0Q(-g>Fz{)TL_I3X}_|RvB$dB(Q_m3x_8b8GJfn%bqvIWV^MUrxuQ~uY7K; z-os4y%no>Kx;W){a_I5i-%k&$bIf(UwRG_qKC9UuD%MM1!Zs#XNl_tP;}O4Ady__L zRXD&SnrtBF^%Llu7}qXw89&KmHlto$CJ4z~6_HGEfCtHM6s+1tLhTS#7)mKVVL>M0 z1i@5IfKT06GFTujqPqKGW{e1zd{-LN)H^MpzQ?naEkw6c?IT-rd<#f*JvNm{rALwf z0C=d<_kRcp(RBX++;e&RF#iDcu1xzd{%d^u%2b&`t+@h)I*z2~v)wKqvp9+H%KBea z`km>r!x=Z-{{W|-rs=;^cKkg509mWv>eqEAi;kqu(|`*6w^hyhncTlG+4LB8@kqiR zYtHq92pR>|0SFh-11J&$`ji3ypgj;9gbe_IKypCg0_KCSb_b%v^NORIsFDiEgZ4pf zA|(BkShR2VSyOvNXOgIlDvKE0BBJadNEfci0fZ9CL5ZnRHS8{_4c|$?dc>{q-DN8| z&BvX$ATQ8M;qxeQJU(ip75?$^T&qOUB_JO7324Qt`74%HVwHt8K!89>2PhmWH{GAo z=)<)e-wnMi(`o$9PfXf(*R`*%ONoYT#&Q8>-K>q@XY?oBRA}KR;J1z`YuY;<*%nh{ zUg~fJeAe8pcXt&-ZstzM0z9OCgil6OUUuV_0>bzg{6e+4l-PCufS zl05rU)eGL~ae3=2m9COB^)4Lwo@zCut%Q?1uH012};#_(z8P$pw6&`8~ z+iIZGEGw=Vc+yr?xQbv8lIxGAX}u{J^;RmXreWU|ne1&SWJ>hn&1E;L=@BoA&|_~T z0feHWH(8>xGO`;0K+9TCMbw0NaEgpnXfS0!+V_57sI{S_%X}x83dBfvk;&uaR)8Ke zI6i5x!-(`vhHB%dRM@M1({SiDhKGOa1rZTk=XdzKBRTFOc=d!yIar01AtAcMM~sq-<- zW`|GLJpw_H+K-2WK1e99BocEA87yJa67uhoJRz-tlLsVN!Cm~pjrVk3(qOsiWvqjyQ(SNr)N2h5dmeRYf8*ujS z$|7G`{{ZyUYl`yn=i800m`&HC>zeNu;_q#!pTu6b{vSx06s?)VZ)za&Yj|VkzSl>` z`dl;q{Jgxp`FCafMb5DnRbyBQcB34y8i%70c5f-$F z!U#C^MIb@`NFquANkKR!GnE3|!PwyXCdZpz(RH=;dv{~Idw}$uFTKH@@s;yAG1f|s z0`9{R#LoMb%CsZ_=7Oicb&FMs=K@zNRFVNvBnbkQ1e&Q^JrhcG_0Va>7I><^kZ4+r z+Suy1tglpC+YTFAJ{oNt{L00ojLi#2sdHJP4pqh(SG9H{vTg7^&4B_;Zo28ty*N>~ z(=1(C!UIGwbth#J$99WS;9;!lo->lD@~wAXH#KSk^WC)M^*t7CSsYfYsk)=Mc)TN* zC5u~;ps!Pn(6ZVAguFFbj>d4N#(@jfF_FzpVA3#^V_jXQR}7;}a=EG~B*H;Kw734~ zFGfC?sXban6*gF^pcrr~D%o2)Kwa>x)@41HXv%Q3B_#m}AmNUxSSoNFmMD%w1_}mF z5HBr|NNlVV6xwsQnz|3Vdq;)C(NK%sv)ZK)@J)fNIV9t3h0(o*k z$4*EI!l8`svIB(521h7RLBd7Z8}=$peod5K~@AJp#=YGNA?n&edypkCN1= zzL@JJXhysESfv`r)p2oLEovjgc`3D7O~q List["Response"]: ... @@ -55,6 +57,7 @@ class BaseEngine(ABC): messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> AsyncGenerator[str, None]: ... diff --git a/src/llmtuner/chat/chat_model.py b/src/llmtuner/chat/chat_model.py index c49d4d78..ba58dd2e 100644 --- a/src/llmtuner/chat/chat_model.py +++ b/src/llmtuner/chat/chat_model.py @@ -8,6 +8,8 @@ from .vllm_engine import VllmEngine if TYPE_CHECKING: + from numpy.typing import NDArray + from .base_engine import BaseEngine, Response @@ -36,9 +38,10 @@ class ChatModel: messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> List["Response"]: - task = asyncio.run_coroutine_threadsafe(self.achat(messages, system, tools, **input_kwargs), self._loop) + task = asyncio.run_coroutine_threadsafe(self.achat(messages, system, tools, image, **input_kwargs), self._loop) return task.result() async def achat( @@ -46,18 +49,20 @@ class ChatModel: messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> List["Response"]: - return await self.engine.chat(messages, system, tools, **input_kwargs) + return await self.engine.chat(messages, system, tools, image, **input_kwargs) def stream_chat( self, messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> Generator[str, None, None]: - generator = self.astream_chat(messages, system, tools, **input_kwargs) + generator = self.astream_chat(messages, system, tools, image, **input_kwargs) while True: try: task = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop) @@ -70,9 +75,10 @@ class ChatModel: messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> AsyncGenerator[str, None]: - async for new_token in self.engine.stream_chat(messages, system, tools, **input_kwargs): + async for new_token in self.engine.stream_chat(messages, system, tools, image, **input_kwargs): yield new_token def get_scores( diff --git a/src/llmtuner/chat/hf_engine.py b/src/llmtuner/chat/hf_engine.py index ddb48e47..f6f51898 100644 --- a/src/llmtuner/chat/hf_engine.py +++ b/src/llmtuner/chat/hf_engine.py @@ -14,7 +14,9 @@ from .base_engine import BaseEngine, Response if TYPE_CHECKING: - from transformers import PreTrainedModel, PreTrainedTokenizer + from numpy.typing import NDArray + from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin + from transformers.image_processing_utils import BaseImageProcessor from trl import PreTrainedModelWrapper from ..data import Template @@ -30,7 +32,9 @@ class HuggingfaceEngine(BaseEngine): generating_args: "GeneratingArguments", ) -> None: self.can_generate = finetuning_args.stage == "sft" - self.tokenizer = load_tokenizer(model_args) + tokenizer_module = load_tokenizer(model_args) + self.tokenizer = tokenizer_module["tokenizer"] + self.processor = tokenizer_module["processor"] self.tokenizer.padding_side = "left" if self.can_generate else "right" self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template) self.model = load_model( @@ -42,13 +46,18 @@ class HuggingfaceEngine(BaseEngine): def _process_args( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], template: "Template", generating_args: Dict[str, Any], messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, input_kwargs: Optional[Dict[str, Any]] = {}, ) -> Tuple[Dict[str, Any], int]: + if processor is not None and image is not None and "" not in messages[0]["content"]: + messages[0]["content"] = messages[0]["content"] + "" + paired_messages = messages + [{"role": "assistant", "content": ""}] prompt_ids, _ = template.encode_oneturn( tokenizer=tokenizer, messages=paired_messages, system=system, tools=tools @@ -95,6 +104,11 @@ class HuggingfaceEngine(BaseEngine): logits_processor=get_logits_processor(), ) + if processor is not None and image is not None: + image_processor: "BaseImageProcessor" = getattr(processor, "image_processor") + pixel_values: "torch.Tensor" = image_processor(image, return_tensors="pt")["pixel_values"] + gen_kwargs["pixel_values"] = pixel_values.to(model.device) + return gen_kwargs, prompt_length @staticmethod @@ -102,15 +116,17 @@ class HuggingfaceEngine(BaseEngine): def _chat( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], template: "Template", generating_args: Dict[str, Any], messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, input_kwargs: Optional[Dict[str, Any]] = {}, ) -> List["Response"]: gen_kwargs, prompt_length = HuggingfaceEngine._process_args( - model, tokenizer, template, generating_args, messages, system, tools, input_kwargs + model, tokenizer, processor, template, generating_args, messages, system, tools, image, input_kwargs ) generate_output = model.generate(**gen_kwargs) response_ids = generate_output[:, prompt_length:] @@ -135,15 +151,17 @@ class HuggingfaceEngine(BaseEngine): def _stream_chat( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", + processor: Optional["ProcessorMixin"], template: "Template", generating_args: Dict[str, Any], messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, input_kwargs: Optional[Dict[str, Any]] = {}, ) -> Callable[[], str]: gen_kwargs, _ = HuggingfaceEngine._process_args( - model, tokenizer, template, generating_args, messages, system, tools, input_kwargs + model, tokenizer, processor, template, generating_args, messages, system, tools, image, input_kwargs ) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) gen_kwargs["streamer"] = streamer @@ -199,6 +217,7 @@ class HuggingfaceEngine(BaseEngine): messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> List["Response"]: if not self.can_generate: @@ -208,11 +227,13 @@ class HuggingfaceEngine(BaseEngine): input_args = ( self.model, self.tokenizer, + self.processor, self.template, self.generating_args, messages, system, tools, + image, input_kwargs, ) async with self._semaphore: @@ -224,6 +245,7 @@ class HuggingfaceEngine(BaseEngine): messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> AsyncGenerator[str, None]: if not self.can_generate: @@ -233,11 +255,13 @@ class HuggingfaceEngine(BaseEngine): input_args = ( self.model, self.tokenizer, + self.processor, self.template, self.generating_args, messages, system, tools, + image, input_kwargs, ) async with self._semaphore: diff --git a/src/llmtuner/chat/vllm_engine.py b/src/llmtuner/chat/vllm_engine.py index 786e743d..a4caa53b 100644 --- a/src/llmtuner/chat/vllm_engine.py +++ b/src/llmtuner/chat/vllm_engine.py @@ -12,7 +12,10 @@ if is_vllm_available(): from vllm import AsyncEngineArgs, AsyncLLMEngine, RequestOutput, SamplingParams from vllm.lora.request import LoRARequest + if TYPE_CHECKING: + from numpy.typing import NDArray + from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments @@ -29,7 +32,9 @@ class VllmEngine(BaseEngine): infer_dtype = str(infer_dtype).split(".")[-1] self.can_generate = finetuning_args.stage == "sft" - self.tokenizer = load_tokenizer(model_args) + tokenizer_module = load_tokenizer(model_args) + self.tokenizer = tokenizer_module["tokenizer"] + self.processor = tokenizer_module["processor"] self.tokenizer.padding_side = "left" self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template) self.generating_args = generating_args.to_dict() @@ -58,6 +63,7 @@ class VllmEngine(BaseEngine): messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> AsyncIterator["RequestOutput"]: request_id = "chatcmpl-{}".format(uuid.uuid4().hex) @@ -121,10 +127,11 @@ class VllmEngine(BaseEngine): messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> List["Response"]: final_output = None - generator = await self._generate(messages, system, tools, **input_kwargs) + generator = await self._generate(messages, system, tools, image, **input_kwargs) async for request_output in generator: final_output = request_output @@ -146,10 +153,11 @@ class VllmEngine(BaseEngine): messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, + image: Optional["NDArray"] = None, **input_kwargs, ) -> AsyncGenerator[str, None]: generated_text = "" - generator = await self._generate(messages, system, tools, **input_kwargs) + generator = await self._generate(messages, system, tools, image, **input_kwargs) async for result in generator: delta_text = result.outputs[0].text[len(generated_text) :] generated_text = result.outputs[0].text diff --git a/src/llmtuner/data/preprocess.py b/src/llmtuner/data/preprocess.py index 0b467724..18681872 100644 --- a/src/llmtuner/data/preprocess.py +++ b/src/llmtuner/data/preprocess.py @@ -8,7 +8,7 @@ from .utils import Role if TYPE_CHECKING: - from PIL import Image + from PIL.Image import Image from transformers import ProcessorMixin, Seq2SeqTrainingArguments from transformers.image_processing_utils import BaseImageProcessor from transformers.tokenization_utils import PreTrainedTokenizer @@ -271,7 +271,11 @@ def get_preprocess_and_print_func( processor: Optional["ProcessorMixin"], ) -> Tuple[Callable, Callable]: if stage == "pt": - preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args) + preprocess_func = partial( + preprocess_pretrain_dataset, + tokenizer=tokenizer, + data_args=data_args, + ) print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer) elif stage == "sft" and not training_args.predict_with_generate: if data_args.packing: diff --git a/src/llmtuner/eval/evaluator.py b/src/llmtuner/eval/evaluator.py index 2c039928..7446c6f5 100644 --- a/src/llmtuner/eval/evaluator.py +++ b/src/llmtuner/eval/evaluator.py @@ -21,7 +21,7 @@ from .template import get_eval_template class Evaluator: def __init__(self, args: Optional[Dict[str, Any]] = None) -> None: self.model_args, self.data_args, self.eval_args, finetuning_args = get_eval_args(args) - self.tokenizer = load_tokenizer(self.model_args) + self.tokenizer = load_tokenizer(self.model_args)["tokenizer"] self.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2 self.template = get_template_and_fix_tokenizer(self.tokenizer, self.data_args.template) self.model = load_model(self.tokenizer, self.model_args, finetuning_args) diff --git a/src/llmtuner/hparams/parser.py b/src/llmtuner/hparams/parser.py index c922dc47..9b02c93b 100644 --- a/src/llmtuner/hparams/parser.py +++ b/src/llmtuner/hparams/parser.py @@ -196,6 +196,9 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: if model_args.infer_backend == "vllm": raise ValueError("vLLM backend is only available for API, CLI and Web.") + if model_args.visual_inputs and data_args.packing: + raise ValueError("Cannot use packing in MLLM fine-tuning.") + _verify_model_args(model_args, finetuning_args) _check_extra_dependencies(model_args, finetuning_args, training_args) diff --git a/src/llmtuner/train/dpo/workflow.py b/src/llmtuner/train/dpo/workflow.py index 929dd029..b19a643e 100644 --- a/src/llmtuner/train/dpo/workflow.py +++ b/src/llmtuner/train/dpo/workflow.py @@ -24,8 +24,9 @@ def run_dpo( finetuning_args: "FinetuningArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer = load_tokenizer(model_args) - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="rm") + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + dataset = get_dataset(model_args, data_args, training_args, stage="rm", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) data_collator = PairwiseDataCollatorWithPadding( diff --git a/src/llmtuner/train/orpo/workflow.py b/src/llmtuner/train/orpo/workflow.py index 5a2fd36c..9c870096 100644 --- a/src/llmtuner/train/orpo/workflow.py +++ b/src/llmtuner/train/orpo/workflow.py @@ -24,8 +24,9 @@ def run_orpo( finetuning_args: "FinetuningArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer = load_tokenizer(model_args) - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="rm") + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + dataset = get_dataset(model_args, data_args, training_args, stage="rm", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) data_collator = PairwiseDataCollatorWithPadding( diff --git a/src/llmtuner/train/ppo/workflow.py b/src/llmtuner/train/ppo/workflow.py index d5854073..8cd15932 100644 --- a/src/llmtuner/train/ppo/workflow.py +++ b/src/llmtuner/train/ppo/workflow.py @@ -27,8 +27,9 @@ def run_ppo( generating_args: "GeneratingArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer = load_tokenizer(model_args) - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="ppo") + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + dataset = get_dataset(model_args, data_args, training_args, stage="ppo", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train, add_valuehead=True) tokenizer.padding_side = "left" # use left-padding in generation while using right-padding in training diff --git a/src/llmtuner/train/pt/workflow.py b/src/llmtuner/train/pt/workflow.py index f683f37a..3b127da4 100644 --- a/src/llmtuner/train/pt/workflow.py +++ b/src/llmtuner/train/pt/workflow.py @@ -25,8 +25,9 @@ def run_pt( finetuning_args: "FinetuningArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer = load_tokenizer(model_args) - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="pt") + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + dataset = get_dataset(model_args, data_args, training_args, stage="pt", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) diff --git a/src/llmtuner/train/rm/workflow.py b/src/llmtuner/train/rm/workflow.py index 42bf1ce6..bd0a756c 100644 --- a/src/llmtuner/train/rm/workflow.py +++ b/src/llmtuner/train/rm/workflow.py @@ -25,8 +25,9 @@ def run_rm( finetuning_args: "FinetuningArguments", callbacks: Optional[List["TrainerCallback"]] = None, ): - tokenizer = load_tokenizer(model_args) - dataset = get_dataset(tokenizer, model_args, data_args, training_args, stage="rm") + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + dataset = get_dataset(model_args, data_args, training_args, stage="rm", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train, add_valuehead=True) data_collator = PairwiseDataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index 3ead9edf..4a9775b4 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -29,9 +29,9 @@ def run_sft( callbacks: Optional[List["TrainerCallback"]] = None, ): tokenizer_module = load_tokenizer(model_args) - dataset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module) tokenizer = tokenizer_module["tokenizer"] - model = load_model(tokenizer, model_args, finetuning_args, is_trainable=training_args.do_train) + dataset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module) + model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) if training_args.predict_with_generate: tokenizer.padding_side = "left" # use left-padding in generation diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index a8a2b8e9..a2eb121f 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -52,7 +52,7 @@ def export_model(args: Optional[Dict[str, Any]] = None): if model_args.adapter_name_or_path is not None and model_args.export_quantization_bit is not None: raise ValueError("Please merge adapters before quantizing the model.") - tokenizer = load_tokenizer(model_args) + tokenizer = load_tokenizer(model_args)["tokenizer"] get_template_and_fix_tokenizer(tokenizer, data_args.template) model = load_model(tokenizer, model_args, finetuning_args) # must after fixing tokenizer to resize vocab diff --git a/src/llmtuner/train/utils.py b/src/llmtuner/train/utils.py index 27dc8eb3..d9fc363d 100644 --- a/src/llmtuner/train/utils.py +++ b/src/llmtuner/train/utils.py @@ -91,7 +91,7 @@ def create_ref_model( ) ref_model_args = ModelArguments(**ref_model_args_dict) ref_finetuning_args = FinetuningArguments(finetuning_type="lora") - tokenizer = load_tokenizer(ref_model_args) + tokenizer = load_tokenizer(ref_model_args)["tokenizer"] ref_model = load_model( tokenizer, ref_model_args, ref_finetuning_args, is_trainable=False, add_valuehead=add_valuehead ) @@ -100,7 +100,7 @@ def create_ref_model( if finetuning_args.finetuning_type == "lora": ref_model = None else: - tokenizer = load_tokenizer(model_args) + tokenizer = load_tokenizer(model_args)["tokenizer"] ref_model = load_model( tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=add_valuehead ) @@ -147,7 +147,7 @@ def create_reward_model( ) reward_model_args = ModelArguments(**reward_model_args_dict) reward_finetuning_args = FinetuningArguments(finetuning_type="lora") - tokenizer = load_tokenizer(reward_model_args) + tokenizer = load_tokenizer(reward_model_args)["tokenizer"] reward_model = load_model( tokenizer, reward_model_args, reward_finetuning_args, is_trainable=False, add_valuehead=True ) diff --git a/src/llmtuner/webui/chatter.py b/src/llmtuner/webui/chatter.py index 82e7b7f1..5aa8f563 100644 --- a/src/llmtuner/webui/chatter.py +++ b/src/llmtuner/webui/chatter.py @@ -2,6 +2,8 @@ import json import os from typing import TYPE_CHECKING, Dict, Generator, List, Optional, Sequence, Tuple +from numpy.typing import NDArray + from ..chat import ChatModel from ..data import Role from ..extras.misc import torch_gc @@ -112,6 +114,7 @@ class WebChatModel(ChatModel): messages: Sequence[Dict[str, str]], system: str, tools: str, + image: Optional[NDArray], max_new_tokens: int, top_p: float, temperature: float, @@ -119,7 +122,7 @@ class WebChatModel(ChatModel): chatbot[-1][1] = "" response = "" for new_text in self.stream_chat( - messages, system, tools, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature + messages, system, tools, image, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature ): response += new_text if tools: diff --git a/src/llmtuner/webui/components/chatbot.py b/src/llmtuner/webui/components/chatbot.py index 82bc4f29..e1be1f7b 100644 --- a/src/llmtuner/webui/components/chatbot.py +++ b/src/llmtuner/webui/components/chatbot.py @@ -23,9 +23,15 @@ def create_chat_box( messages = gr.State([]) with gr.Row(): with gr.Column(scale=4): - role = gr.Dropdown(choices=[Role.USER.value, Role.OBSERVATION.value], value=Role.USER.value) - system = gr.Textbox(show_label=False) - tools = gr.Textbox(show_label=False, lines=2) + with gr.Row(): + with gr.Column(): + role = gr.Dropdown(choices=[Role.USER.value, Role.OBSERVATION.value], value=Role.USER.value) + system = gr.Textbox(show_label=False) + tools = gr.Textbox(show_label=False, lines=4) + + with gr.Column(): + image = gr.Image(type="numpy") + query = gr.Textbox(show_label=False, lines=8) submit_btn = gr.Button(variant="primary") @@ -43,7 +49,7 @@ def create_chat_box( [chatbot, messages, query], ).then( engine.chatter.stream, - [chatbot, messages, system, tools, max_new_tokens, top_p, temperature], + [chatbot, messages, system, tools, image, max_new_tokens, top_p, temperature], [chatbot, messages], ) clear_btn.click(lambda: ([], []), outputs=[chatbot, messages]) @@ -56,6 +62,7 @@ def create_chat_box( role=role, system=system, tools=tools, + image=image, query=query, submit_btn=submit_btn, max_new_tokens=max_new_tokens, diff --git a/src/llmtuner/webui/locales.py b/src/llmtuner/webui/locales.py index 3af9128f..8e93efd6 100644 --- a/src/llmtuner/webui/locales.py +++ b/src/llmtuner/webui/locales.py @@ -1073,6 +1073,17 @@ LOCALES = { "placeholder": "工具列表(非必填)", }, }, + "image": { + "en": { + "label": "Image (optional)", + }, + "ru": { + "label": "Изображение (по желанию)", + }, + "zh": { + "label": "图像(非必填)", + }, + }, "query": { "en": { "placeholder": "Input...", From 27ba1b63ce22d6fc2198c517141051705b2e77f6 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 26 Apr 2024 05:44:30 +0800 Subject: [PATCH 34/34] update readme --- README.md | 5 ++++- README_zh.md | 5 ++++- scripts/cal_lr.py | 5 +++-- scripts/length_cdf.py | 4 ++-- src/llmtuner/hparams/parser.py | 3 +++ 5 files changed, 16 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4e87e369..d74a3fb5 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,8 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ ## Changelog +[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See `examples/lora_single_gpu/sft_mllm.sh` for usage. + [24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details. [24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See `examples/extras/mod` for usage. @@ -148,6 +150,7 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ | [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - | | [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 | | [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | q_proj,v_proj | llama3 | +| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | q_proj,v_proj | vicuna | | [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | q_proj,v_proj | mistral | | [OLMo](https://huggingface.co/allenai) | 1B/7B | q_proj,v_proj | - | | [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - | @@ -457,7 +460,7 @@ If you have a project that should be incorporated, please contact via email or c This repository is licensed under the [Apache-2.0 License](LICENSE). -Please follow the model licenses to use the corresponding model weights: [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan) +Please follow the model licenses to use the corresponding model weights: [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2/LLaVA-1.5](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan) ## Citation diff --git a/README_zh.md b/README_zh.md index 599af301..ed19ff94 100644 --- a/README_zh.md +++ b/README_zh.md @@ -68,6 +68,8 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd ## 更新日志 +[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 `examples/lora_single_gpu/sft_mllm.sh`。 + [24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。 [24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 `examples/extras/mod`。 @@ -148,6 +150,7 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd | [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - | | [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 | | [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | q_proj,v_proj | llama3 | +| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | q_proj,v_proj | vicuna | | [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | q_proj,v_proj | mistral | | [OLMo](https://huggingface.co/allenai) | 1B/7B | q_proj,v_proj | - | | [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - | @@ -457,7 +460,7 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。 -使用模型权重时,请遵循对应的模型协议:[Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan) +使用模型权重时,请遵循对应的模型协议:[Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command-R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2/LLaVA-1.5](https://ai.meta.com/llama/license/) / [LLaMA-3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan) ## 引用 diff --git a/scripts/cal_lr.py b/scripts/cal_lr.py index ffe47f28..c1c1f7a2 100644 --- a/scripts/cal_lr.py +++ b/scripts/cal_lr.py @@ -44,8 +44,9 @@ def calculate_lr( overwrite_cache=True, ) ) - tokenizer = load_tokenizer(model_args) - trainset = get_dataset(tokenizer, model_args, data_args, training_args, stage) + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module) if stage == "pt": data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) elif stage == "sft": diff --git a/scripts/length_cdf.py b/scripts/length_cdf.py index cf0698de..1446f77a 100644 --- a/scripts/length_cdf.py +++ b/scripts/length_cdf.py @@ -32,8 +32,8 @@ def length_cdf( overwrite_cache=True, ) ) - tokenizer = load_tokenizer(model_args) - trainset = get_dataset(tokenizer, model_args, data_args, training_args, stage="sft") + tokenizer_module = load_tokenizer(model_args) + trainset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module) total_num = len(trainset) length_dict = defaultdict(int) for sample in tqdm(trainset["input_ids"]): diff --git a/src/llmtuner/hparams/parser.py b/src/llmtuner/hparams/parser.py index 9b02c93b..715b8f95 100644 --- a/src/llmtuner/hparams/parser.py +++ b/src/llmtuner/hparams/parser.py @@ -320,6 +320,9 @@ def get_infer_args(args: Optional[Dict[str, Any]] = None) -> _INFER_CLS: if model_args.adapter_name_or_path is not None and len(model_args.adapter_name_or_path) != 1: raise ValueError("vLLM only accepts a single adapter. Merge them first.") + if model_args.visual_inputs: + raise ValueError("vLLM engine does not support MLLM yet. Stay tuned.") + _verify_model_args(model_args, finetuning_args) _check_extra_dependencies(model_args, finetuning_args)