add unittest
This commit is contained in:
parent
779aae83d2
commit
608de799a2
|
@ -44,7 +44,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "How would you identify potential customers for a new product?\n",
|
||||
"content": "How would you identify potential customers for a new product?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -204,7 +204,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Claim: \"The Prime Minister of the United Kingdom does not preside over England and other countries.\"\nIs the claim above correct, and can it be verified by human common sense and without a web search?\nOptions:\n- yes\n- no\nyes\nEngland is a country in the UK over which the Prime Minister of the United Kingdom has authority.\n\n\n**Q**\nDoes the following sentence make sense?\n\"People have not highly prized Edvard Munch work.\"\nOptions:\n- yes\n- no\n**A**\nno\nEdvard Munch's paintings have sold for millions of dollars.\n\n\nYes / no, is the following a verifiable fact?\n\"The Invisible Man is Jesus H. Christ in the flesh.\"\nno\nIt is an outlandish statement and in no way plausible.\n\n\n**Q**\nIs the following a factual statement?\n\"I picked up the Peach and talked into it, but couldnt hear my mother who just called.\"\nOptions:\n- yes\n- no\n**A**\nno\nA peach is not a phone, but a fruit instead.\n\n\nClaim: \"If a person gets a Snakebite, it does not need to be treated.\"\nIs the claim above correct, and can it be verified by human common sense and without a web search?\nOptions:\n- yes\n- no\nno\nSnakebites can be fatal if left untreated and should always be checked.\n\n\nIs the following statement true?\n\"A Bengal fox can thrive on only a plant-based diet.\"\n",
|
||||
"content": "Claim: \"The Prime Minister of the United Kingdom does not preside over England and other countries.\"\nIs the claim above correct, and can it be verified by human common sense and without a web search?\nOptions:\n- yes\n- no\nyes\nEngland is a country in the UK over which the Prime Minister of the United Kingdom has authority.\n\n\n**Q**\nDoes the following sentence make sense?\n\"People have not highly prized Edvard Munch work.\"\nOptions:\n- yes\n- no\n**A**\nno\nEdvard Munch's paintings have sold for millions of dollars.\n\n\nYes / no, is the following a verifiable fact?\n\"The Invisible Man is Jesus H. Christ in the flesh.\"\nno\nIt is an outlandish statement and in no way plausible.\n\n\n**Q**\nIs the following a factual statement?\n\"I picked up the Peach and talked into it, but couldnt hear my mother who just called.\"\nOptions:\n- yes\n- no\n**A**\nno\nA peach is not a phone, but a fruit instead.\n\n\nClaim: \"If a person gets a Snakebite, it does not need to be treated.\"\nIs the claim above correct, and can it be verified by human common sense and without a web search?\nOptions:\n- yes\n- no\nno\nSnakebites can be fatal if left untreated and should always be checked.\n\n\nIs the following statement true?\n\"A Bengal fox can thrive on only a plant-based diet.\"",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -269,7 +269,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "If a car uses 8 gallons of gasoline to travel 240 miles, what is its fuel efficiency in miles per gallon (mpg)?\n",
|
||||
"content": "If a car uses 8 gallons of gasoline to travel 240 miles, what is its fuel efficiency in miles per gallon (mpg)?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -311,7 +311,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Explain what a decision tree is.\n",
|
||||
"content": "Explain what a decision tree is.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -423,7 +423,7 @@
|
|||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "In step 7, we will implement the apply root motion logic on the client side after receiving it through the `Client_ReceiveRootMotion` function.\n\n1. Apply Root Motion to Clients:\n\nHere's the code to complete step 7:\n\n```cpp\nvoid UMyCharacterMovementComponent::Client_ReceiveRootMotion_Implementation(APlayerController* TargetPlayerController, const FTransform& RootMotion)\n{\n // Retrieve the target player's pawn and character controller\n ACharacter* TargetCharacter = Cast<ACharacter>(TargetPlayerController->GetPawn());\n UInputDispatch:\\\n",
|
||||
"content": "In step 7, we will implement the apply root motion logic on the client side after receiving it through the `Client_ReceiveRootMotion` function.\n\n1. Apply Root Motion to Clients:\n\nHere's the code to complete step 7:\n\n```cpp\nvoid UMyCharacterMovementComponent::Client_ReceiveRootMotion_Implementation(APlayerController* TargetPlayerController, const FTransform& RootMotion)\n{\n // Retrieve the target player's pawn and character controller\n ACharacter* TargetCharacter = Cast<ACharacter>(TargetPlayerController->GetPawn());\n UInputDispatch:",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
|
@ -536,7 +536,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "What gets wetter as it dries?\n",
|
||||
"content": "What gets wetter as it dries?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -630,7 +630,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "To what type of sentence does the word \"once\" belong?\n",
|
||||
"content": "To what type of sentence does the word \"once\" belong?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1215,7 +1215,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Find the right ending to this passage.\n\n(Mashable) -- Google has quietly launched its own full-length online magazine, a quarterly publication whose aim is to create a \"breathing space in a busy world.\" The first edition of Think Quarterly, based out of the UK, is a 68-page dive into the world of data and its impact on business. The first thing most people will notice is that it's a visually stunning piece of work. It's a rich Flash app with Google's quirky sensibilities and the in-depth writing you might find in BusinessWeek or Salon. Google's quarterly magazine is edited and designed by creative agency The Church of London.\n\nMany of\n\nOPTIONS:\n- BusinessWeek or Salon's articles feature interviews with Google executives and technology leaders.\n- Flash's articles feature interviews with Google executives and technology leaders.\n- Google's articles feature interviews with Google executives and technology leaders.\n- London's articles feature interviews with Google executives and technology leaders.\n- Mashable's articles feature interviews with Google executives and technology leaders.\n- Quarterly's articles feature interviews with Google executives and technology leaders.\n- The Church's articles feature interviews with Google executives and technology leaders.\n- Think Quarterly's articles feature interviews with Google executives and technology leaders.\n- UK's articles feature interviews with Google executives and technology leaders.\n\n",
|
||||
"content": "Find the right ending to this passage.\n\n(Mashable) -- Google has quietly launched its own full-length online magazine, a quarterly publication whose aim is to create a \"breathing space in a busy world.\" The first edition of Think Quarterly, based out of the UK, is a 68-page dive into the world of data and its impact on business. The first thing most people will notice is that it's a visually stunning piece of work. It's a rich Flash app with Google's quirky sensibilities and the in-depth writing you might find in BusinessWeek or Salon. Google's quarterly magazine is edited and designed by creative agency The Church of London.\n\nMany of\n\nOPTIONS:\n- BusinessWeek or Salon's articles feature interviews with Google executives and technology leaders.\n- Flash's articles feature interviews with Google executives and technology leaders.\n- Google's articles feature interviews with Google executives and technology leaders.\n- London's articles feature interviews with Google executives and technology leaders.\n- Mashable's articles feature interviews with Google executives and technology leaders.\n- Quarterly's articles feature interviews with Google executives and technology leaders.\n- The Church's articles feature interviews with Google executives and technology leaders.\n- Think Quarterly's articles feature interviews with Google executives and technology leaders.\n- UK's articles feature interviews with Google executives and technology leaders.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1261,7 +1261,7 @@
|
|||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": " Artificial Intelligence, or AI, refers to the development of intelligent or intelligent-like systems, which can perform tasks that would typically require human-level intelligence, such as problem-solving, critical thinking, learning, and pattern recognition. These systems are designed to operate autonomously, adapt, or improve upon their performance, typically through the use of data analysis, machine learning, natural language processing, and other computational techniques.\n\nOn the other hand, a generic algorithm is a high-level, flexible, and commonly used method to solve a wide range of problems without the need to be tailored to specific problem instances or domains. Generic algorithms are typically used for solving broad problem classes, like optimization or search problems, where specific details of the problem can vary but the underlying computational logic remains the same. For example, the most common generic algorithm, called the \"divide and conquer\" strategy, is used to efficiently solve various optimization and search problems by recursively partitioning the problem space into smaller subproblems, solving these subproblems independently, and then combining their solutions to form a complete solution.\n\nThe distinction between AI and generic algorithms lies in their scope, purpose, and applicability. AI attempts to build systems that can simulate or contain cognitive abilities, while generic algorithms are more focused on providing a general and efficient method to solve various problems, without necessarily emulating or reproducing human cognitive abilities. In other words, AI represents a broader effort to develop intelligent systems, while generic algorithms are specific problem-solving methods that can be applied to a wide range of problems but may not have the capability or goal of imitating human intelligence.\n\nThat being said, some generic algorithms might be incorporating elements of AI or machine learning into their design, as developments in AI often create new and powerful optimizations to solve complex problems. In that context, algorithms can be considered as building blocks for AI, with machine learning algorithms, for instance, using generic strategies like gradient descent or reinforcement learning to adapt their decision-making processes based on experience, i.e., using data to improve their performance over time.\n\nIn summary, AI is the development of intelligent systems that emulate or simulate human cognitive tasks, while generic algorithms are broad, flexible problem-solving methods that can be applied across various problem classes without the necessity to reproduce intelligence. Although some generic algorithms leverage AI techniques to enhance their performance, the former focuses on problem-solving, while the latter concentrates on building systems with cognitive capabilities.\n",
|
||||
"content": "Artificial Intelligence, or AI, refers to the development of intelligent or intelligent-like systems, which can perform tasks that would typically require human-level intelligence, such as problem-solving, critical thinking, learning, and pattern recognition. These systems are designed to operate autonomously, adapt, or improve upon their performance, typically through the use of data analysis, machine learning, natural language processing, and other computational techniques.\n\nOn the other hand, a generic algorithm is a high-level, flexible, and commonly used method to solve a wide range of problems without the need to be tailored to specific problem instances or domains. Generic algorithms are typically used for solving broad problem classes, like optimization or search problems, where specific details of the problem can vary but the underlying computational logic remains the same. For example, the most common generic algorithm, called the \"divide and conquer\" strategy, is used to efficiently solve various optimization and search problems by recursively partitioning the problem space into smaller subproblems, solving these subproblems independently, and then combining their solutions to form a complete solution.\n\nThe distinction between AI and generic algorithms lies in their scope, purpose, and applicability. AI attempts to build systems that can simulate or contain cognitive abilities, while generic algorithms are more focused on providing a general and efficient method to solve various problems, without necessarily emulating or reproducing human cognitive abilities. In other words, AI represents a broader effort to develop intelligent systems, while generic algorithms are specific problem-solving methods that can be applied to a wide range of problems but may not have the capability or goal of imitating human intelligence.\n\nThat being said, some generic algorithms might be incorporating elements of AI or machine learning into their design, as developments in AI often create new and powerful optimizations to solve complex problems. In that context, algorithms can be considered as building blocks for AI, with machine learning algorithms, for instance, using generic strategies like gradient descent or reinforcement learning to adapt their decision-making processes based on experience, i.e., using data to improve their performance over time.\n\nIn summary, AI is the development of intelligent systems that emulate or simulate human cognitive tasks, while generic algorithms are broad, flexible problem-solving methods that can be applied across various problem classes without the necessity to reproduce intelligence. Although some generic algorithms leverage AI techniques to enhance their performance, the former focuses on problem-solving, while the latter concentrates on building systems with cognitive capabilities.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
|
@ -1435,7 +1435,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Rewrite each original sentence in order to make it easier to understand by non-native speakers of English. You can do so by replacing complex words with simpler synonyms (i.e. paraphrasing), deleting unimportant information (i.e. compression), and/or splitting a long complex sentence into several simpler ones. The final simplified sentences need to be grammatical, fluent, and retain the main ideas of their original counterparts without altering their meanings.\n\nEx Input:\nIn 1887, was reorganized into a stock company.\n\nEx Output:\nIn 1887 it became a stock company.\n\n\nEx Input:\nBedroom bondage games often constitute foreplay.\n\nEx Output:\nBedroom bondage games are often foreplay.\n\n\nEx Input:\nThe petitions at the end of the hymn (beginning Salvum fac populum tuum) are a selection of verses from the book of Psalms, appended subsequently to the original hymn.\n\nEx Output:\n",
|
||||
"content": "Rewrite each original sentence in order to make it easier to understand by non-native speakers of English. You can do so by replacing complex words with simpler synonyms (i.e. paraphrasing), deleting unimportant information (i.e. compression), and/or splitting a long complex sentence into several simpler ones. The final simplified sentences need to be grammatical, fluent, and retain the main ideas of their original counterparts without altering their meanings.\n\nEx Input:\nIn 1887, was reorganized into a stock company.\n\nEx Output:\nIn 1887 it became a stock company.\n\n\nEx Input:\nBedroom bondage games often constitute foreplay.\n\nEx Output:\nBedroom bondage games are often foreplay.\n\n\nEx Input:\nThe petitions at the end of the hymn (beginning Salvum fac populum tuum) are a selection of verses from the book of Psalms, appended subsequently to the original hymn.\n\nEx Output:",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1500,7 +1500,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Given the task definition and input, reply with output. Given a premise and two alternatives in Hindi, choose the alternative that is either a plausible cause or effect of the situation described by the premise. The premise is the 'कथन' field and the alternatives are the 'विकल्प A' and 'विकल्प B' fields. The output should either be \"विकल्प A\" or \"विकल्प B\" based on your judgment.\n\nकथन: मैं एक नए शहर में चला गया।\n\n विकल्प A: मुझे एक बेहतर भुगतान वाली नौकरी मिली।\n\n विकल्प B: मेरा बेटा पैदा हुआ।\n",
|
||||
"content": "Given the task definition and input, reply with output. Given a premise and two alternatives in Hindi, choose the alternative that is either a plausible cause or effect of the situation described by the premise. The premise is the 'कथन' field and the alternatives are the 'विकल्प A' and 'विकल्प B' fields. The output should either be \"विकल्प A\" or \"विकल्प B\" based on your judgment.\n\nकथन: मैं एक नए शहर में चला गया।\n\n विकल्प A: मुझे एक बेहतर भुगतान वाली नौकरी मिली।\n\n विकल्प B: मेरा बेटा पैदा हुआ।",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1552,7 +1552,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "OK. You'll be given the following question. Please do chain-of-thought reasoning.\nWhich of these sentences doesn't make sense?\nOptions:\n- Sentence A: \"We used our tax income to help foreign poor people\"\n- Sentence B: \"We used our tax income to help local poor people\"\n",
|
||||
"content": "OK. You'll be given the following question. Please do chain-of-thought reasoning.\nWhich of these sentences doesn't make sense?\nOptions:\n- Sentence A: \"We used our tax income to help foreign poor people\"\n- Sentence B: \"We used our tax income to help local poor people\"",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1780,7 +1780,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "The provided file includes inquiries about restaurants, and we ask you to translate those to the Japanese language. Please bear in mind the following guidlines while doing the translation: 1) We are looking for the most naturally written and formal form of each sentence in your language. We are *NOT* looking for colloquial forms of the sentence. We are looking for formal form which is how you would type your queries in a text-based virtual assistant. 2) The words between quotation marks *SHOULD NOT* be translated. We expect you to keep those values intact and include the quotation marks around them as well. 3) The fully capitalized words like DATE_0, or DURATION_0 *SHOULD NOT* be translated. Please keep them as they are in the translations. 4) Please do not localize measurement units like miles to kilometers during your translation. miles should be translated to its equivalent in your language. 6) Note the input is all lowercased except for special placeholders. Please do the same in your translations.\n\nwhere are the best \" fast food \" joints in my area .\n周辺で最高の\" fast food \"レストランは?\n\nsearch for \" steakhouse \" restaurants in my city\n市内の\" steakhouse \"レストランを検索して。\n\nsearch for restaurants in \" reno , nevada \" .\n",
|
||||
"content": "The provided file includes inquiries about restaurants, and we ask you to translate those to the Japanese language. Please bear in mind the following guidlines while doing the translation: 1) We are looking for the most naturally written and formal form of each sentence in your language. We are *NOT* looking for colloquial forms of the sentence. We are looking for formal form which is how you would type your queries in a text-based virtual assistant. 2) The words between quotation marks *SHOULD NOT* be translated. We expect you to keep those values intact and include the quotation marks around them as well. 3) The fully capitalized words like DATE_0, or DURATION_0 *SHOULD NOT* be translated. Please keep them as they are in the translations. 4) Please do not localize measurement units like miles to kilometers during your translation. miles should be translated to its equivalent in your language. 6) Note the input is all lowercased except for special placeholders. Please do the same in your translations.\n\nwhere are the best \" fast food \" joints in my area .\n周辺で最高の\" fast food \"レストランは?\n\nsearch for \" steakhouse \" restaurants in my city\n市内の\" steakhouse \"レストランを検索して。\n\nsearch for restaurants in \" reno , nevada \" .",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1832,7 +1832,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "To what type of sentence does the word \"once\" belong?\n",
|
||||
"content": "To what type of sentence does the word \"once\" belong?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -1861,11 +1861,11 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "What children's book character lives in the Hundred Acre Wood with friends like Piglet, Tigger, and Eeyore?\n\n",
|
||||
"content": "What children's book character lives in the Hundred Acre Wood with friends like Piglet, Tigger, and Eeyore?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "Winnie the Pooh\n\n",
|
||||
"content": "Winnie the Pooh",
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
|
@ -1968,7 +1968,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Then we came back to the hotel for some time out before dinner . For dinner we went to Markus ir Ko - supposedly Lithuanian style food . I had roast veal with chanterelles ( orange coloured mushrooms we ' ve seen since Finland ) . David had an English pepper steak . The meal was good but expensive .\n\n\n A). They are on vacation.\n B). They are not on vacation.\n C). None of the above choices ..\n D). They are a vegan.\nAnswer the following question: What is probably true about the narrator ?\n",
|
||||
"content": "Then we came back to the hotel for some time out before dinner . For dinner we went to Markus ir Ko - supposedly Lithuanian style food . I had roast veal with chanterelles ( orange coloured mushrooms we ' ve seen since Finland ) . David had an English pepper steak . The meal was good but expensive .\n\n\n A). They are on vacation.\n B). They are not on vacation.\n C). None of the above choices ..\n D). They are a vegan.\nAnswer the following question: What is probably true about the narrator ?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -2162,7 +2162,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Given the task definition and input, reply with output. You are given a sentence in Portuguese. Your job is to translate the Portuguese sentence into Spanish.\n\nE, em parte, isto já está a acontecer.\n",
|
||||
"content": "Given the task definition and input, reply with output. You are given a sentence in Portuguese. Your job is to translate the Portuguese sentence into Spanish.\n\nE, em parte, isto já está a acontecer.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -2468,7 +2468,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Find the derivative of the function f(x) = 3x^2 - 4x + 2. Explain your calculations.\n",
|
||||
"content": "Find the derivative of the function f(x) = 3x^2 - 4x + 2. Explain your calculations.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -2652,7 +2652,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Categorize the words \"whale,\" \"dolphin,\" \"shark,\" \"clam,\" and \"crab\" according to their biological classification.\n",
|
||||
"content": "Categorize the words \"whale,\" \"dolphin,\" \"shark,\" \"clam,\" and \"crab\" according to their biological classification.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -2694,7 +2694,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Translate to French:\n\nThe Working Party took note of the request from the Inland Transport Committee to include the facilitation of transport of disabled people in its programme of work and noted that this issue was already covered in some of its instruments, such as the 1986 resolution No. 25, which contains Guidelines for Passenger Vessels also suited for carrying Disabled Persons and, more recently, resolution No. 61.\n\n",
|
||||
"content": "Translate to French:\n\nThe Working Party took note of the request from the Inland Transport Committee to include the facilitation of transport of disabled people in its programme of work and noted that this issue was already covered in some of its instruments, such as the 1986 resolution No. 25, which contains Guidelines for Passenger Vessels also suited for carrying Disabled Persons and, more recently, resolution No. 61.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -2766,7 +2766,7 @@
|
|||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": " As an Azure Cloud Engineer working with Microsoft Azure, you can indeed utilize Azure Active Directory (Azure AD) for managing data access to databases, instead of relying solely on Shared Access Signatures (SAS tokens). Here's a detailed explanation on how to accomplish this, as well as information on why Azure AD might be more advantageous for certain use cases.\n\n1. Azure Active Directory overview:\nAzure Active Directory is Microsoft's cloud-based identity and access management service. It allows administrators to manage user identities, group memberships, and permissions for accessing resources in various cloud-based services, including Azure databases like Azure SQL Database and Azure Cosmos DB.\n\n2. Advantages of using Azure AD over SAS tokens:\n - Granular access control: Azure AD supports role-based access control (RBAC) and attribute-based access control (ABAC). It allows you to grant precise permissions to different users or groups, increasing the overall security of your resources.\n - Simplified identity management: Integrating Azure AD eliminates the need to manage separate access tokens. Centralized user management on the Azure portal makes authentication easier and ensures a seamless user experience.\n - Auditing and compliance: Azure AD offers built-in auditing and reporting features, which can monitor and track access to resources, helping you maintain compliance with industry regulations.\n - SAML-based integration: You can integrate Azure AD with on-premise and third-party identity providers via Security Assertion Markup Language (SAML) based authentication. This allows users to access Azure resources using their existing enterprise identity and enables seamless single sign-on (SSO) experience.\n\n3. Setting up Azure AD integration with Azure databases:\n - For Azure SQL Database and Azure Cosmos DB, you can follow the provided guides to configure access using Azure AD authentication:\n - Azure SQL Database: https://docs.microsoft.com/en-us/azure/sql-database/sql-database-aad-integration\n - Azure Cosmos DB: https://docs.microsoft.com/en-us/azure/cosmos-db/configure-კlineto-authentication#azure-active-directory-authentication\n\n - When configuring Azure AD authentication, choose the appropriate RBAC permissions and create Active Directory groups to assign roles that control access to databases or resources within them.\n - You may still use SAS tokens complementary to Azure AD, as there might be scenarios where using SAS tokens is more appropriate, such as granting temporary access to non-Azure AD users or external services.\n\n4. Deciding when to use SAS tokens or Azure AD:\n - When access to resources is time-limited or shared with non-Azure AD users, SAS tokens might be more appropriate.\n - For user identities within your organization, Azure AD offers a more streamlined approach for identity and access management across various Azure resources, making it an excellent choice for long-term access control.\n\nIn summary, it is possible to use Azure AD in place of SAS tokens to manage data access to Azure databases. Azure AD provides several advantages, such as improved access control, simplified identity management, and auditing capabilities. By following the specified steps to integrate Azure AD authentication with Azure SQL Database and Azure Cosmos DB, you can take advantage of these benefits for secure and efficient access management for your organization's cloud resources.\n",
|
||||
"content": "As an Azure Cloud Engineer working with Microsoft Azure, you can indeed utilize Azure Active Directory (Azure AD) for managing data access to databases, instead of relying solely on Shared Access Signatures (SAS tokens). Here's a detailed explanation on how to accomplish this, as well as information on why Azure AD might be more advantageous for certain use cases.\n\n1. Azure Active Directory overview:\nAzure Active Directory is Microsoft's cloud-based identity and access management service. It allows administrators to manage user identities, group memberships, and permissions for accessing resources in various cloud-based services, including Azure databases like Azure SQL Database and Azure Cosmos DB.\n\n2. Advantages of using Azure AD over SAS tokens:\n - Granular access control: Azure AD supports role-based access control (RBAC) and attribute-based access control (ABAC). It allows you to grant precise permissions to different users or groups, increasing the overall security of your resources.\n - Simplified identity management: Integrating Azure AD eliminates the need to manage separate access tokens. Centralized user management on the Azure portal makes authentication easier and ensures a seamless user experience.\n - Auditing and compliance: Azure AD offers built-in auditing and reporting features, which can monitor and track access to resources, helping you maintain compliance with industry regulations.\n - SAML-based integration: You can integrate Azure AD with on-premise and third-party identity providers via Security Assertion Markup Language (SAML) based authentication. This allows users to access Azure resources using their existing enterprise identity and enables seamless single sign-on (SSO) experience.\n\n3. Setting up Azure AD integration with Azure databases:\n - For Azure SQL Database and Azure Cosmos DB, you can follow the provided guides to configure access using Azure AD authentication:\n - Azure SQL Database: https://docs.microsoft.com/en-us/azure/sql-database/sql-database-aad-integration\n - Azure Cosmos DB: https://docs.microsoft.com/en-us/azure/cosmos-db/configure-კlineto-authentication#azure-active-directory-authentication\n\n - When configuring Azure AD authentication, choose the appropriate RBAC permissions and create Active Directory groups to assign roles that control access to databases or resources within them.\n - You may still use SAS tokens complementary to Azure AD, as there might be scenarios where using SAS tokens is more appropriate, such as granting temporary access to non-Azure AD users or external services.\n\n4. Deciding when to use SAS tokens or Azure AD:\n - When access to resources is time-limited or shared with non-Azure AD users, SAS tokens might be more appropriate.\n - For user identities within your organization, Azure AD offers a more streamlined approach for identity and access management across various Azure resources, making it an excellent choice for long-term access control.\n\nIn summary, it is possible to use Azure AD in place of SAS tokens to manage data access to Azure databases. Azure AD provides several advantages, such as improved access control, simplified identity management, and auditing capabilities. By following the specified steps to integrate Azure AD authentication with Azure SQL Database and Azure Cosmos DB, you can take advantage of these benefits for secure and efficient access management for your organization's cloud resources.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
|
@ -2817,7 +2817,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Title: Never get to full charge Review: The battery is aways charging, never stops, no matter how long it's been connected. When you disconnect it, the screen indicator always shows half charge. I've wrote to Amazon's Costumer Service and they asked me to send the item back to get a replacement. I didn't send it because that'll mean having my ipod unuseful for at least 2 weeks. Is the review positive or negative?\n",
|
||||
"content": "Title: Never get to full charge Review: The battery is aways charging, never stops, no matter how long it's been connected. When you disconnect it, the screen indicator always shows half charge. I've wrote to Amazon's Costumer Service and they asked me to send the item back to get a replacement. I didn't send it because that'll mean having my ipod unuseful for at least 2 weeks. Is the review positive or negative?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -2898,7 +2898,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Elaborate on customer experience management in the retail industry.\n",
|
||||
"content": "Elaborate on customer experience management in the retail industry.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -3058,7 +3058,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "What is the smallest unit of life, considered as the building block of living organisms? ___\n",
|
||||
"content": "What is the smallest unit of life, considered as the building block of living organisms? ___",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -3260,7 +3260,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "[QUESTION] Given the sentence \"A man standing next to another man accepting a check for 10.\" can we conclude that \"000 dollars.\"?\nOptions:\n- yes\n- it is not possible to tell\n- no\nA man standing next to another man is waving a check in the air.\nThe answer is no.\n\n[QUESTION] Test for natural language inference.\nPremise: \"A man wearing a green shirt with suspenders sits on a bench beside a large bouquet of metallic helium balloons.\"\nHypothesis: \"The man has popped the balloons and is laughing manically.\"\nIs the hypothesis entailed by the premise?\nOptions:\n- yes\n- no\n- it is not possible to tell\nOne can not sit beside balloons if one has popped said balloons.\nThe answer is no.\n\n[QUESTION] Given the sentence \"An older man with work gloves holds the handrail while wearing a helmet with a flashlight attached.\" can we conclude that \"The older man with gloves is holding onto the railing.\"?\nOptions:\n- yes\n- it is not possible to tell\n- no\nThe older man is holding the railing because the man with work gloves holds the handrail.\nThe answer is yes.\n\n[QUESTION] Premise: \"A brown dog is carrying a toy in its mouth.\"\nBased on this premise, can we conclude that the hypothesis \"A dog has a toy.\" is true?\nOptions:\n- yes\n- it is not possible to tell\n- no\n",
|
||||
"content": "[QUESTION] Given the sentence \"A man standing next to another man accepting a check for 10.\" can we conclude that \"000 dollars.\"?\nOptions:\n- yes\n- it is not possible to tell\n- no\nA man standing next to another man is waving a check in the air.\nThe answer is no.\n\n[QUESTION] Test for natural language inference.\nPremise: \"A man wearing a green shirt with suspenders sits on a bench beside a large bouquet of metallic helium balloons.\"\nHypothesis: \"The man has popped the balloons and is laughing manically.\"\nIs the hypothesis entailed by the premise?\nOptions:\n- yes\n- no\n- it is not possible to tell\nOne can not sit beside balloons if one has popped said balloons.\nThe answer is no.\n\n[QUESTION] Given the sentence \"An older man with work gloves holds the handrail while wearing a helmet with a flashlight attached.\" can we conclude that \"The older man with gloves is holding onto the railing.\"?\nOptions:\n- yes\n- it is not possible to tell\n- no\nThe older man is holding the railing because the man with work gloves holds the handrail.\nThe answer is yes.\n\n[QUESTION] Premise: \"A brown dog is carrying a toy in its mouth.\"\nBased on this premise, can we conclude that the hypothesis \"A dog has a toy.\" is true?\nOptions:\n- yes\n- it is not possible to tell\n- no",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -3286,11 +3286,11 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Which African mammal, known for its long neck and spotted coat, has an unusually lengthy gestation period of about 15 months?\n",
|
||||
"content": "Which African mammal, known for its long neck and spotted coat, has an unusually lengthy gestation period of about 15 months?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "Giraffe\n\n",
|
||||
"content": "Giraffe",
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
|
@ -3462,7 +3462,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Identify the logical fallacy in the following argument: 'Bill Gates is a billionaire, so he must be a genius at cooking as well.'\n",
|
||||
"content": "Identify the logical fallacy in the following argument: 'Bill Gates is a billionaire, so he must be a genius at cooking as well.'",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -3895,7 +3895,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "In this task, you are given two phrases: Head and Tail, separated with <sep>. The Head and the Tail events are short phrases possibly involving participants. The names of specific people have been replaced by generic words (e.g., PersonX, PersonY, PersonZ). PersonX is always the subject of the event. You have to determine whether the Head can be hindered by what is mentioned in the Tail or not. In this task, hindering introduces hindrances that obstruct the natural path to the achievement of a goal. For example, the event PersonX adopts a cat can be obstructed if PersonX is allergic to cats. Classify your answers into \"Yes\" and \"No\". The phrase may also contain \"___\", a placeholder that can be an object, a person, and/or an action.\n\nEx Input:\nHead: PersonX writes PersonY letters<sep>Tail: PersonX can't find a pen.\n\nEx Output:\nYes\n\n\nEx Input:\nHead: PersonX applies sunscreen<sep>Tail: careful\n\nEx Output:\nNo\n\n\nEx Input:\nHead: PersonX allergic to bees<sep>Tail: PersonX's mom made sure he had no allergies.\n\nEx Output:\n",
|
||||
"content": "In this task, you are given two phrases: Head and Tail, separated with <sep>. The Head and the Tail events are short phrases possibly involving participants. The names of specific people have been replaced by generic words (e.g., PersonX, PersonY, PersonZ). PersonX is always the subject of the event. You have to determine whether the Head can be hindered by what is mentioned in the Tail or not. In this task, hindering introduces hindrances that obstruct the natural path to the achievement of a goal. For example, the event PersonX adopts a cat can be obstructed if PersonX is allergic to cats. Classify your answers into \"Yes\" and \"No\". The phrase may also contain \"___\", a placeholder that can be an object, a person, and/or an action.\n\nEx Input:\nHead: PersonX writes PersonY letters<sep>Tail: PersonX can't find a pen.\n\nEx Output:\nYes\n\n\nEx Input:\nHead: PersonX applies sunscreen<sep>Tail: careful\n\nEx Output:\nNo\n\n\nEx Input:\nHead: PersonX allergic to bees<sep>Tail: PersonX's mom made sure he had no allergies.\n\nEx Output:",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -4055,7 +4055,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "instruction:\nGiven a sentence in English(en), generate a translation of that sentence in Hindi(hi) without changing the meaning of the input sentence as the output\nquestion:\nGeetika Swami, who is active on Twitter, says that for her, Major Khushbu Kanwar, daughter of a bus conductor, who has led an all women contingent of Assam Rifles, is the Lakshmi of India.\nanswer:\nTwitter पर active रहने वाली गीतिका स्वामी का कहना है कि उनके लिये मेजर खुशबू कंवर भारत की लक्ष्मी है जो bus conductor की बेटी है और उन्होंने असम Riles की All - omen टुकड़ी का नेतृत्व किया था \n\n\nquestion:\ntheir prodigious virtuosity cradles our social fabric, ensuring its bright future.\nanswer:\nहमारी बेटिया, हमारा गौरव हैं और इन बेटियों के महात्मय से ही, हमारे समाज की, एक मजबूत पहचान है और उसका उज्ज्वल भविष्य है \n\n\nquestion:\nGuru Nanak Dev Ji also went to Tibet where people accorded him the status of a Guru.\nanswer:\n",
|
||||
"content": "instruction:\nGiven a sentence in English(en), generate a translation of that sentence in Hindi(hi) without changing the meaning of the input sentence as the output\nquestion:\nGeetika Swami, who is active on Twitter, says that for her, Major Khushbu Kanwar, daughter of a bus conductor, who has led an all women contingent of Assam Rifles, is the Lakshmi of India.\nanswer:\nTwitter पर active रहने वाली गीतिका स्वामी का कहना है कि उनके लिये मेजर खुशबू कंवर भारत की लक्ष्मी है जो bus conductor की बेटी है और उन्होंने असम Riles की All - omen टुकड़ी का नेतृत्व किया था \n\n\nquestion:\ntheir prodigious virtuosity cradles our social fabric, ensuring its bright future.\nanswer:\nहमारी बेटिया, हमारा गौरव हैं और इन बेटियों के महात्मय से ही, हमारे समाज की, एक मजबूत पहचान है और उसका उज्ज्वल भविष्य है \n\n\nquestion:\nGuru Nanak Dev Ji also went to Tibet where people accorded him the status of a Guru.\nanswer:",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -4126,7 +4126,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Q: Pick which sentence is not logical.\nOptions:\n- Sentence A: \"Most of us will cry when we finish watching a tragedy.\"\n- Sentence B: \"Most of us will cry when we finish watching a comedy.\"\n\nLet's solve this gradually.\n",
|
||||
"content": "Q: Pick which sentence is not logical.\nOptions:\n- Sentence A: \"Most of us will cry when we finish watching a tragedy.\"\n- Sentence B: \"Most of us will cry when we finish watching a comedy.\"\n\nLet's solve this gradually.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -4353,11 +4353,11 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Which U.S. state is home to the Grand Canyon?\n\n",
|
||||
"content": "Which U.S. state is home to the Grand Canyon?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "Arizona\n\n",
|
||||
"content": "Arizona",
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
|
@ -4437,7 +4437,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Add punctuation: High chair 1 bedroom in all 1 bedroom with double bed Annex with 2 sleeping places 6 qm open terrace\n\n",
|
||||
"content": "Add punctuation: High chair 1 bedroom in all 1 bedroom with double bed Annex with 2 sleeping places 6 qm open terrace",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -4463,7 +4463,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Answer the following question, with explanation first. Of the following two sentences, which one is against common sense?\nOptions:\n- Sentence A: \"I will be at the club tomorrow for my antenatal.\"\n- Sentence B: \"I will be at the hospital tomorrow for my antenatal.\"\n",
|
||||
"content": "Answer the following question, with explanation first. Of the following two sentences, which one is against common sense?\nOptions:\n- Sentence A: \"I will be at the club tomorrow for my antenatal.\"\n- Sentence B: \"I will be at the hospital tomorrow for my antenatal.\"",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -4657,7 +4657,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Draft a witty slogan for a snack bar.\n",
|
||||
"content": "Draft a witty slogan for a snack bar.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -4941,7 +4941,7 @@
|
|||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": " if ($currentProcessId -eq $foregroundProcessId) {\n Write-Warning \"The active window belongs to the current process, cannot switch tab.\"\n return\n}\n\n$wbem = [System.Managementxp:WbemScripting.SWbemLocator]::Connect()\n$csvc = $wbem.ConnectServer('.', 'root\\this:v:largemedia\\TermServ') | Out-Null\n\n$desiredTimeout = 300 # Timeout of 5 minutes\n$currentTimeout = 0\n$maxWaitCount = 25\n$waitCount = 0\n\n# Wait for the current window to release focus or script to time out\nDo {\n Start-Sleep -Milliseconds 100\n $currentTimeout ++\n} Until ($waitCount -lt $maxWaitCount -or $currentTimeout -ge $desiredTimeout)\n\nif ($waitCount -ge $maxWaitCount) {\n Write-Warning \"The script could not wait for the active window to lose focus before attempting to switch tab.\"\n}\n\nwhile ($true) {\n $hWnd = [User32]::GetForegroundWindow()\n\n if ($hWnd -ne $[env:WND:ForegroundWindow]) {\n # Active window changed, update internal variable.\n $[env:WND:ForegroundWindow] = $hWnd\n $currentProcessId = 0\n\n [User32]::GetWindowThreadProcessId($hWnd, [ref]$currentProcessId)\n if ($currentProcessId -eq $PID) {\n Write-Warning \"The active window changed to another instance of the current process, cannot switch tab.\"\n return\n }\n }\n\n # Wait for keyboard focused element to appear or script to time out, limit the maximum waiting time to 2 seconds.\n $keyboardFocusedElementAppeared = $false\n $keyboardFocusTimeout = 2000\n Do {\n $csvc.ExecMethod('Root\\Automation', $psCulture.TextInfo.ListSeparator + 'windowevents', 1000, @(\n 0, # wmintere @no__fen_kwdesc: Events to monitor, combination of any existing: CauseClose, CauseCreate, MouseActivate, ObjectFocus, SessionChange, TIConnext, TIDiscon\n 0, # dispEvtContext\n 0, # intFlags\n 'windowtitlematch:your application window title before `Tab` switch e.g., \"Your_App_Title\", get focused element' # strCondition, The condition string that will apply to this monitor instance.\n\n ), $null, [ref]$keyboardFocusedElementAppeared, [ref]$csvcRoot, [ref]$status)\n\n Start-Sleep -Milliseconds 100\n } Until ($keyboardFocusTimeout -ge $currentTimeout -or $keyboardFocusedElementAppeared)\n\n if (-not $keyboardFocusedElementAppeared) {\n Write-Warning \"The script could not detect the keyboard focused element before attempting to switch tab.\";\n return\n } else {\n break;\n }\n}\n\n$sendControlTab = $forward\n$switch = @($sendControlTab -eq $true; \"Ctrl\" + \"<Right>\"\n $sendControlTab -eq $false; \"Ctrl\" + \"<Left>\"\n )[0]\n\nSend-Keys -InputObject $switch\n}\n\n# Running this script requires administrator permissions using PowerShell ISE or by running with the flags \"-ExecutionPolicy Bypass -file script.ps1\"\n```\nWith this script, you can call the `Switch-Tab` function with or without the `$forward` parameter to switch to the next tab or the previous tab, respectively. Keep in mind that this script will only work with the active application that is referenced in the 'windowtitlematch' parameter of the `ExecMethod` call, where you should provide your application's window title before the tab switch.\n\nAlso note that the `-ExecutionPolicy` should be set to `Bypass`, either with `powershell -ExecutionPolicy Bypass -File script.ps1` or within PowerShell ISE as an administrator.\n",
|
||||
"content": "if ($currentProcessId -eq $foregroundProcessId) {\n Write-Warning \"The active window belongs to the current process, cannot switch tab.\"\n return\n}\n\n$wbem = [System.Managementxp:WbemScripting.SWbemLocator]::Connect()\n$csvc = $wbem.ConnectServer('.', 'root\\this:v:largemedia\\TermServ') | Out-Null\n\n$desiredTimeout = 300 # Timeout of 5 minutes\n$currentTimeout = 0\n$maxWaitCount = 25\n$waitCount = 0\n\n# Wait for the current window to release focus or script to time out\nDo {\n Start-Sleep -Milliseconds 100\n $currentTimeout ++\n} Until ($waitCount -lt $maxWaitCount -or $currentTimeout -ge $desiredTimeout)\n\nif ($waitCount -ge $maxWaitCount) {\n Write-Warning \"The script could not wait for the active window to lose focus before attempting to switch tab.\"\n}\n\nwhile ($true) {\n $hWnd = [User32]::GetForegroundWindow()\n\n if ($hWnd -ne $[env:WND:ForegroundWindow]) {\n # Active window changed, update internal variable.\n $[env:WND:ForegroundWindow] = $hWnd\n $currentProcessId = 0\n\n [User32]::GetWindowThreadProcessId($hWnd, [ref]$currentProcessId)\n if ($currentProcessId -eq $PID) {\n Write-Warning \"The active window changed to another instance of the current process, cannot switch tab.\"\n return\n }\n }\n\n # Wait for keyboard focused element to appear or script to time out, limit the maximum waiting time to 2 seconds.\n $keyboardFocusedElementAppeared = $false\n $keyboardFocusTimeout = 2000\n Do {\n $csvc.ExecMethod('Root\\Automation', $psCulture.TextInfo.ListSeparator + 'windowevents', 1000, @(\n 0, # wmintere @no__fen_kwdesc: Events to monitor, combination of any existing: CauseClose, CauseCreate, MouseActivate, ObjectFocus, SessionChange, TIConnext, TIDiscon\n 0, # dispEvtContext\n 0, # intFlags\n 'windowtitlematch:your application window title before `Tab` switch e.g., \"Your_App_Title\", get focused element' # strCondition, The condition string that will apply to this monitor instance.\n\n ), $null, [ref]$keyboardFocusedElementAppeared, [ref]$csvcRoot, [ref]$status)\n\n Start-Sleep -Milliseconds 100\n } Until ($keyboardFocusTimeout -ge $currentTimeout -or $keyboardFocusedElementAppeared)\n\n if (-not $keyboardFocusedElementAppeared) {\n Write-Warning \"The script could not detect the keyboard focused element before attempting to switch tab.\";\n return\n } else {\n break;\n }\n}\n\n$sendControlTab = $forward\n$switch = @($sendControlTab -eq $true; \"Ctrl\" + \"<Right>\"\n $sendControlTab -eq $false; \"Ctrl\" + \"<Left>\"\n )[0]\n\nSend-Keys -InputObject $switch\n}\n\n# Running this script requires administrator permissions using PowerShell ISE or by running with the flags \"-ExecutionPolicy Bypass -file script.ps1\"\n```\nWith this script, you can call the `Switch-Tab` function with or without the `$forward` parameter to switch to the next tab or the previous tab, respectively. Keep in mind that this script will only work with the active application that is referenced in the 'windowtitlematch' parameter of the `ExecMethod` call, where you should provide your application's window title before the tab switch.\n\nAlso note that the `-ExecutionPolicy` should be set to `Bypass`, either with `powershell -ExecutionPolicy Bypass -File script.ps1` or within PowerShell ISE as an administrator.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
|
@ -5155,7 +5155,7 @@
|
|||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": " To calculate the product of 512088 and 137872, we use the standard long multiplication method familiar from school:\n\n 5 1 2 0 8 8\n× 1 3 7 8 7 2\n-------\n 6 9 2 7 1 3 6\n+-0-0-0-7-8-8+\n ------------\n\n Feature: \n 5 1 2 0 8\n 0 0 0 7 8\n -----------\n 5 6 0 6 8\n\nAdd the product of the ones digit:\n 7 3 6\n +-8-8-8-\n --------\n 1 4 0\n\nMove the resulted digits to its multiplies position and add zero if necessary:\n69271 36\n 70008 + 0\n---------\n1462848\n\nThus, the result of 512088 × 137872 is 7105356608.\n",
|
||||
"content": "To calculate the product of 512088 and 137872, we use the standard long multiplication method familiar from school:\n\n 5 1 2 0 8 8\n× 1 3 7 8 7 2\n-------\n 6 9 2 7 1 3 6\n+-0-0-0-7-8-8+\n ------------\n\n Feature: \n 5 1 2 0 8\n 0 0 0 7 8\n -----------\n 5 6 0 6 8\n\nAdd the product of the ones digit:\n 7 3 6\n +-8-8-8-\n --------\n 1 4 0\n\nMove the resulted digits to its multiplies position and add zero if necessary:\n69271 36\n 70008 + 0\n---------\n1462848\n\nThus, the result of 512088 × 137872 is 7105356608.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
|
@ -5320,7 +5320,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Choose how you want this story to end.\n\nSpark, the QPR club mascot met with the five-year-old girl who was struck in the face by one of Yaya Toure's misplaced shots at Loftus Road on Saturday evening. QPR's official website said that Chloe Power, who has been a season ticket holder with the R's for two years, was greeted by the cuddly tiger on Monday afternoon in the hope of making her memories of the experience a little more positive. The City midfielder and QPR offered signed shirts to the young girl, with the former scribbling the word 'sorry' to the unfortunate toddler. Spark and young Chloe Power give a thumbs up following an eventful weekend for the five-year-old\n\n'We'd like to thank QPR and\n\nOPTIONS:\n- Chloe Power for showing their concern – it just goes to show that there's more to football than the result.\n- City for showing their concern – it just goes to show that there's more to football than the result.\n- Loftus Road for showing their concern – it just goes to show that there's more to football than the result.\n- QPR for showing their concern – it just goes to show that there's more to football than the result.\n- R's for showing their concern – it just goes to show that there's more to football than the result.\n- Yaya Toure for showing their concern – it just goes to show that there's more to football than the result.\n\n",
|
||||
"content": "Choose how you want this story to end.\n\nSpark, the QPR club mascot met with the five-year-old girl who was struck in the face by one of Yaya Toure's misplaced shots at Loftus Road on Saturday evening. QPR's official website said that Chloe Power, who has been a season ticket holder with the R's for two years, was greeted by the cuddly tiger on Monday afternoon in the hope of making her memories of the experience a little more positive. The City midfielder and QPR offered signed shirts to the young girl, with the former scribbling the word 'sorry' to the unfortunate toddler. Spark and young Chloe Power give a thumbs up following an eventful weekend for the five-year-old\n\n'We'd like to thank QPR and\n\nOPTIONS:\n- Chloe Power for showing their concern – it just goes to show that there's more to football than the result.\n- City for showing their concern – it just goes to show that there's more to football than the result.\n- Loftus Road for showing their concern – it just goes to show that there's more to football than the result.\n- QPR for showing their concern – it just goes to show that there's more to football than the result.\n- R's for showing their concern – it just goes to show that there's more to football than the result.\n- Yaya Toure for showing their concern – it just goes to show that there's more to football than the result.",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
@ -5359,7 +5359,7 @@
|
|||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Given the task definition and input, reply with output. In this task, you are given a question containing a blank (_) and two options. You should pick the best option to answer the question. Please answer with \"A\" or \"B\".\n\nWe cannot find the cleaning rod dropped inside the pool because the _ is deep. (A) pool (B) rod\n",
|
||||
"content": "Given the task definition and input, reply with output. In this task, you are given a question containing a blank (_) and two options. You should pick the best option to answer the question. Please answer with \"A\" or \"B\".\n\nWe cannot find the cleaning rod dropped inside the pool because the _ is deep. (A) pool (B) rod",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
|
|
|
@ -35,7 +35,6 @@ Dependency graph:
|
|||
transformers==4.41.2 (chatglm)
|
||||
"""
|
||||
|
||||
|
||||
from .cli import VERSION
|
||||
|
||||
|
||||
|
|
|
@ -119,7 +119,9 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
|||
num_training_steps = training_args.max_steps
|
||||
else:
|
||||
total_train_batch_size = backward_batch_size * finetuning_args.ppo_buffer_size * training_args.world_size
|
||||
num_training_steps = training_args.num_train_epochs * math.ceil(len(train_dataset) / total_train_batch_size)
|
||||
num_training_steps = training_args.num_train_epochs * math.ceil(
|
||||
len(train_dataset) / total_train_batch_size
|
||||
)
|
||||
|
||||
optimizer = self.create_optimizer(model, training_args, finetuning_args)
|
||||
scheduler = self.create_scheduler(training_args, num_training_steps, optimizer)
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
# Copyright 2024 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Dict, Optional, Sequence, Set, Tuple, Union
|
||||
|
||||
import torch
|
||||
from peft import PeftModel
|
||||
from transformers import AutoModelForCausalLM
|
||||
from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
from ..data import get_dataset
|
||||
from ..extras.misc import get_current_device
|
||||
from ..hparams import get_infer_args, get_train_args
|
||||
from ..model import load_model, load_tokenizer
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from datasets import Dataset
|
||||
from peft import LoraModel
|
||||
from transformers import PreTrainedModel
|
||||
|
||||
|
||||
def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module", diff_keys: Sequence[str] = []) -> None:
|
||||
state_dict_a = model_a.state_dict()
|
||||
state_dict_b = model_b.state_dict()
|
||||
assert set(state_dict_a.keys()) == set(state_dict_b.keys())
|
||||
for name in state_dict_a.keys():
|
||||
if any(key in name for key in diff_keys):
|
||||
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5) is False
|
||||
else:
|
||||
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5) is True
|
||||
|
||||
|
||||
def check_lora_model(model: "LoraModel") -> Tuple[Set[str], Set[str]]:
|
||||
linear_modules, extra_modules = set(), set()
|
||||
for name, param in model.named_parameters():
|
||||
if any(module in name for module in ["lora_A", "lora_B"]):
|
||||
linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1])
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
elif "modules_to_save" in name:
|
||||
extra_modules.add(name.split(".modules_to_save", maxsplit=1)[0].split(".")[-1])
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
else:
|
||||
assert param.requires_grad is False
|
||||
assert param.dtype == torch.float16
|
||||
|
||||
return linear_modules, extra_modules
|
||||
|
||||
|
||||
def load_train_model(add_valuehead: bool = False, **kwargs) -> "PreTrainedModel":
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(kwargs)
|
||||
tokenizer = load_tokenizer(model_args)["tokenizer"]
|
||||
return load_model(tokenizer, model_args, finetuning_args, is_trainable=True, add_valuehead=add_valuehead)
|
||||
|
||||
|
||||
def load_infer_model(add_valuehead: bool = False, **kwargs) -> "PreTrainedModel":
|
||||
model_args, _, finetuning_args, _ = get_infer_args(kwargs)
|
||||
tokenizer = load_tokenizer(model_args)["tokenizer"]
|
||||
return load_model(tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=add_valuehead)
|
||||
|
||||
|
||||
def load_reference_model(
|
||||
model_path: str,
|
||||
lora_path: Optional[str] = None,
|
||||
use_lora: bool = False,
|
||||
use_pissa: bool = False,
|
||||
is_trainable: bool = False,
|
||||
add_valuehead: bool = False,
|
||||
) -> Union["PreTrainedModel", "LoraModel"]:
|
||||
if add_valuehead:
|
||||
model: "AutoModelForCausalLMWithValueHead" = AutoModelForCausalLMWithValueHead.from_pretrained(
|
||||
model_path, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
if not is_trainable:
|
||||
model.v_head = model.v_head.to(torch.float16)
|
||||
|
||||
return model
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
if use_lora or use_pissa:
|
||||
model = PeftModel.from_pretrained(
|
||||
model, lora_path, subfolder="pissa_init" if use_pissa else None, is_trainable=is_trainable
|
||||
)
|
||||
for param in filter(lambda p: p.requires_grad, model.parameters()):
|
||||
param.data = param.data.to(torch.float32)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def load_train_dataset(**kwargs) -> "Dataset":
|
||||
model_args, data_args, training_args, _, _ = get_train_args(kwargs)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
dataset_module = get_dataset(model_args, data_args, training_args, stage=kwargs["stage"], **tokenizer_module)
|
||||
return dataset_module["train_dataset"]
|
||||
|
||||
|
||||
def patch_valuehead_model():
|
||||
def post_init(self: "AutoModelForCausalLMWithValueHead", state_dict: Dict[str, "torch.Tensor"]) -> None:
|
||||
state_dict = {k[7:]: state_dict[k] for k in state_dict.keys() if k.startswith("v_head.")}
|
||||
self.v_head.load_state_dict(state_dict, strict=False)
|
||||
del state_dict
|
||||
|
||||
AutoModelForCausalLMWithValueHead.post_init = post_init
|
|
@ -19,20 +19,21 @@ import pytest
|
|||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from llamafactory.data import get_dataset
|
||||
from llamafactory.hparams import get_train_args
|
||||
from llamafactory.model import load_tokenizer
|
||||
from llamafactory.extras.constants import IGNORE_INDEX
|
||||
from llamafactory.train.test_utils import load_train_dataset
|
||||
|
||||
|
||||
DEMO_DATA = os.environ.get("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"stage": "sft",
|
||||
"stage": "kto",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
"dataset": "llamafactory/tiny-supervised-dataset",
|
||||
"dataset_dir": "ONLINE",
|
||||
"dataset": "kto_en_demo",
|
||||
"dataset_dir": "REMOTE:" + DEMO_DATA,
|
||||
"template": "llama3",
|
||||
"cutoff_len": 8192,
|
||||
"overwrite_cache": True,
|
||||
|
@ -43,25 +44,16 @@ TRAIN_ARGS = {
|
|||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_supervised(num_samples: int):
|
||||
model_args, data_args, training_args, _, _ = get_train_args(TRAIN_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
tokenizer = tokenizer_module["tokenizer"]
|
||||
dataset_module = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module)
|
||||
|
||||
def test_feedback_data(num_samples: int):
|
||||
train_dataset = load_train_dataset(**TRAIN_ARGS)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
|
||||
original_data = load_dataset(TRAIN_ARGS["dataset"], split="train")
|
||||
original_data = load_dataset(DEMO_DATA, name="kto_en_demo", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
prompt = original_data[index]["instruction"]
|
||||
if original_data[index]["input"]:
|
||||
prompt += "\n" + original_data[index]["input"]
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": prompt},
|
||||
{"role": "assistant", "content": original_data[index]["output"]},
|
||||
]
|
||||
templated_result = ref_tokenizer.apply_chat_template(messages, tokenize=False)
|
||||
decoded_result = tokenizer.decode(dataset_module["train_dataset"]["input_ids"][index])
|
||||
assert templated_result == decoded_result
|
||||
messages = original_data["messages"][index]
|
||||
ref_input_ids = ref_tokenizer.apply_chat_template(messages)
|
||||
prompt_len = len(ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True))
|
||||
ref_labels = [IGNORE_INDEX] * prompt_len + ref_input_ids[prompt_len:]
|
||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||
assert train_dataset["labels"][index] == ref_labels
|
||||
assert train_dataset["kto_tags"][index] == original_data["label"][index]
|
|
@ -0,0 +1,78 @@
|
|||
# Copyright 2024 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import random
|
||||
from typing import Dict, List
|
||||
|
||||
import pytest
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from llamafactory.extras.constants import IGNORE_INDEX
|
||||
from llamafactory.train.test_utils import load_train_dataset
|
||||
|
||||
|
||||
DEMO_DATA = os.environ.get("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"stage": "rm",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
"dataset": "dpo_en_demo",
|
||||
"dataset_dir": "REMOTE:" + DEMO_DATA,
|
||||
"template": "llama3",
|
||||
"cutoff_len": 8192,
|
||||
"overwrite_cache": True,
|
||||
"output_dir": "dummy_dir",
|
||||
"overwrite_output_dir": True,
|
||||
"fp16": True,
|
||||
}
|
||||
|
||||
|
||||
def _convert_sharegpt_to_openai(messages: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
||||
role_mapping = {"human": "user", "gpt": "assistant", "system": "system"}
|
||||
new_messages = []
|
||||
for message in messages:
|
||||
new_messages.append({"role": role_mapping[message["from"]], "content": message["value"]})
|
||||
|
||||
return new_messages
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_pairwise_data(num_samples: int):
|
||||
train_dataset = load_train_dataset(**TRAIN_ARGS)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
original_data = load_dataset(DEMO_DATA, name="dpo_en_demo", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
chosen_messages = original_data["conversations"][index] + [original_data["chosen"][index]]
|
||||
rejected_messages = original_data["conversations"][index] + [original_data["rejected"][index]]
|
||||
chosen_messages = _convert_sharegpt_to_openai(chosen_messages)
|
||||
rejected_messages = _convert_sharegpt_to_openai(rejected_messages)
|
||||
ref_chosen_input_ids = ref_tokenizer.apply_chat_template(chosen_messages)
|
||||
chosen_prompt_len = len(ref_tokenizer.apply_chat_template(chosen_messages[:-1], add_generation_prompt=True))
|
||||
ref_chosen_labels = [IGNORE_INDEX] * chosen_prompt_len + ref_chosen_input_ids[chosen_prompt_len:]
|
||||
ref_rejected_input_ids = ref_tokenizer.apply_chat_template(rejected_messages)
|
||||
rejected_prompt_len = len(
|
||||
ref_tokenizer.apply_chat_template(rejected_messages[:-1], add_generation_prompt=True)
|
||||
)
|
||||
ref_rejected_labels = [IGNORE_INDEX] * rejected_prompt_len + ref_rejected_input_ids[rejected_prompt_len:]
|
||||
assert train_dataset["chosen_input_ids"][index] == ref_chosen_input_ids
|
||||
assert train_dataset["chosen_labels"][index] == ref_chosen_labels
|
||||
assert train_dataset["rejected_input_ids"][index] == ref_rejected_input_ids
|
||||
assert train_dataset["rejected_labels"][index] == ref_rejected_labels
|
|
@ -0,0 +1,104 @@
|
|||
# Copyright 2024 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import random
|
||||
|
||||
import pytest
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from llamafactory.extras.constants import IGNORE_INDEX
|
||||
from llamafactory.train.test_utils import load_train_dataset
|
||||
|
||||
|
||||
DEMO_DATA = os.environ.get("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TINY_DATA = os.environ.get("TINY_DATA", "llamafactory/tiny-supervised-dataset")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"stage": "sft",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
"template": "llama3",
|
||||
"cutoff_len": 8192,
|
||||
"overwrite_cache": True,
|
||||
"output_dir": "dummy_dir",
|
||||
"overwrite_output_dir": True,
|
||||
"fp16": True,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_supervised_single_turn(num_samples: int):
|
||||
train_dataset = load_train_dataset(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
original_data = load_dataset(TINY_DATA, split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
prompt = original_data["instruction"][index]
|
||||
if original_data["input"][index]:
|
||||
prompt += "\n" + original_data["input"][index]
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": prompt},
|
||||
{"role": "assistant", "content": original_data["output"][index]},
|
||||
]
|
||||
ref_input_ids = ref_tokenizer.apply_chat_template(messages)
|
||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [8])
|
||||
def test_supervised_multi_turn(num_samples: int):
|
||||
train_dataset = load_train_dataset(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
ref_input_ids = ref_tokenizer.apply_chat_template(original_data["messages"][index])
|
||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [4])
|
||||
def test_supervised_train_on_prompt(num_samples: int):
|
||||
train_dataset = load_train_dataset(
|
||||
dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", train_on_prompt=True, **TRAIN_ARGS
|
||||
)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
ref_ids = ref_tokenizer.apply_chat_template(original_data["messages"][index])
|
||||
assert train_dataset["input_ids"][index] == ref_ids
|
||||
assert train_dataset["labels"][index] == ref_ids
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [4])
|
||||
def test_supervised_mask_history(num_samples: int):
|
||||
train_dataset = load_train_dataset(
|
||||
dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", mask_history=True, **TRAIN_ARGS
|
||||
)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
messages = original_data["messages"][index]
|
||||
ref_input_ids = ref_tokenizer.apply_chat_template(messages)
|
||||
prompt_len = len(ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True))
|
||||
ref_label_ids = [IGNORE_INDEX] * prompt_len + ref_input_ids[prompt_len:]
|
||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||
assert train_dataset["labels"][index] == ref_label_ids
|
|
@ -0,0 +1,58 @@
|
|||
# Copyright 2024 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import random
|
||||
|
||||
import pytest
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from llamafactory.train.test_utils import load_train_dataset
|
||||
|
||||
|
||||
DEMO_DATA = os.environ.get("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TINY_DATA = os.environ.get("TINY_DATA", "llamafactory/tiny-supervised-dataset")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"stage": "sft",
|
||||
"do_predict": True,
|
||||
"finetuning_type": "full",
|
||||
"template": "llama3",
|
||||
"cutoff_len": 8192,
|
||||
"overwrite_cache": True,
|
||||
"output_dir": "dummy_dir",
|
||||
"overwrite_output_dir": True,
|
||||
"predict_with_generate": True,
|
||||
"fp16": True,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_unsupervised_data(num_samples: int):
|
||||
train_dataset = load_train_dataset(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
messages = original_data["messages"][index]
|
||||
ref_ids = ref_tokenizer.apply_chat_template(messages)
|
||||
ref_input_ids = ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True)
|
||||
ref_labels = ref_ids[len(ref_input_ids) :]
|
||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||
assert train_dataset["labels"][index] == ref_labels
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Sequence
|
||||
from typing import TYPE_CHECKING, List, Sequence
|
||||
|
||||
import pytest
|
||||
from transformers import AutoTokenizer
|
||||
|
@ -39,7 +39,7 @@ MESSAGES = [
|
|||
|
||||
def _check_tokenization(
|
||||
tokenizer: "PreTrainedTokenizer", batch_input_ids: Sequence[Sequence[int]], batch_text: Sequence[str]
|
||||
):
|
||||
) -> None:
|
||||
for input_ids, text in zip(batch_input_ids, batch_text):
|
||||
assert input_ids == tokenizer.encode(text, add_special_tokens=False)
|
||||
assert tokenizer.decode(input_ids) == text
|
||||
|
@ -47,7 +47,7 @@ def _check_tokenization(
|
|||
|
||||
def _check_single_template(
|
||||
model_id: str, template_name: str, prompt_str: str, answer_str: str, extra_str: str, use_fast: bool
|
||||
):
|
||||
) -> List[str]:
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=use_fast, token=HF_TOKEN)
|
||||
content_str = tokenizer.apply_chat_template(MESSAGES, tokenize=False)
|
||||
content_ids = tokenizer.apply_chat_template(MESSAGES, tokenize=True)
|
||||
|
@ -59,7 +59,7 @@ def _check_single_template(
|
|||
return content_ids
|
||||
|
||||
|
||||
def _check_template(model_id: str, template_name: str, prompt_str: str, answer_str: str, extra_str: str = ""):
|
||||
def _check_template(model_id: str, template_name: str, prompt_str: str, answer_str: str, extra_str: str = "") -> None:
|
||||
"""
|
||||
Checks template for both the slow tokenizer and the fast tokenizer.
|
||||
|
||||
|
|
|
@ -16,8 +16,7 @@ import os
|
|||
|
||||
from transformers.utils import is_flash_attn_2_available, is_torch_sdpa_available
|
||||
|
||||
from llamafactory.hparams import get_infer_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import load_infer_model
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -42,9 +41,7 @@ def test_attention():
|
|||
"fa2": "LlamaFlashAttention2",
|
||||
}
|
||||
for requested_attention in attention_available:
|
||||
model_args, _, finetuning_args, _ = get_infer_args({"flash_attn": requested_attention, **INFER_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args)
|
||||
model = load_infer_model(flash_attn=requested_attention, **INFER_ARGS)
|
||||
for module in model.modules():
|
||||
if "Attention" in module.__class__.__name__:
|
||||
assert module.__class__.__name__ == llama_attention_classes[requested_attention]
|
||||
|
|
|
@ -17,8 +17,7 @@ import os
|
|||
import torch
|
||||
|
||||
from llamafactory.extras.misc import get_current_device
|
||||
from llamafactory.hparams import get_train_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import load_train_model
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -41,34 +40,26 @@ TRAIN_ARGS = {
|
|||
|
||||
|
||||
def test_checkpointing_enable():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"disable_gradient_checkpointing": False, **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
model = load_train_model(disable_gradient_checkpointing=False, **TRAIN_ARGS)
|
||||
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
||||
assert getattr(module, "gradient_checkpointing") is True
|
||||
|
||||
|
||||
def test_checkpointing_disable():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"disable_gradient_checkpointing": True, **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
model = load_train_model(disable_gradient_checkpointing=True, **TRAIN_ARGS)
|
||||
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
||||
assert getattr(module, "gradient_checkpointing") is False
|
||||
|
||||
|
||||
def test_upcast_layernorm():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"upcast_layernorm": True, **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
|
||||
for name, param in model.named_parameters():
|
||||
if param.ndim == 1 and "norm" in name:
|
||||
assert param.dtype == torch.float32
|
||||
|
||||
|
||||
def test_upcast_lmhead_output():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"upcast_lmhead_output": True, **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
|
||||
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())
|
||||
outputs: "torch.Tensor" = model.get_output_embeddings()(inputs)
|
||||
assert outputs.dtype == torch.float32
|
||||
|
|
|
@ -13,16 +13,15 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM
|
||||
from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
from llamafactory.extras.misc import get_current_device
|
||||
from llamafactory.hparams import get_infer_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import (
|
||||
compare_model,
|
||||
load_infer_model,
|
||||
load_reference_model,
|
||||
patch_valuehead_model,
|
||||
)
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -36,45 +35,19 @@ INFER_ARGS = {
|
|||
}
|
||||
|
||||
|
||||
def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module"):
|
||||
state_dict_a = model_a.state_dict()
|
||||
state_dict_b = model_b.state_dict()
|
||||
assert set(state_dict_a.keys()) == set(state_dict_b.keys())
|
||||
for name in state_dict_a.keys():
|
||||
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fix_valuehead_cpu_loading():
|
||||
def post_init(self: "AutoModelForCausalLMWithValueHead", state_dict: Dict[str, "torch.Tensor"]):
|
||||
state_dict = {k[7:]: state_dict[k] for k in state_dict.keys() if k.startswith("v_head.")}
|
||||
self.v_head.load_state_dict(state_dict, strict=False)
|
||||
del state_dict
|
||||
|
||||
AutoModelForCausalLMWithValueHead.post_init = post_init
|
||||
patch_valuehead_model()
|
||||
|
||||
|
||||
def test_base():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
|
||||
|
||||
ref_model = AutoModelForCausalLM.from_pretrained(
|
||||
TINY_LLAMA, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA)
|
||||
compare_model(model, ref_model)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
||||
def test_valuehead():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(
|
||||
tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False, add_valuehead=True
|
||||
)
|
||||
|
||||
ref_model: "AutoModelForCausalLMWithValueHead" = AutoModelForCausalLMWithValueHead.from_pretrained(
|
||||
TINY_LLAMA_VALUEHEAD, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
ref_model.v_head = ref_model.v_head.to(torch.float16)
|
||||
model = load_infer_model(add_valuehead=True, **INFER_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA_VALUEHEAD, add_valuehead=True)
|
||||
compare_model(model, ref_model)
|
||||
|
|
|
@ -16,8 +16,7 @@ import os
|
|||
|
||||
import torch
|
||||
|
||||
from llamafactory.hparams import get_infer_args, get_train_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -46,10 +45,7 @@ INFER_ARGS = {
|
|||
|
||||
|
||||
def test_freeze_train_all_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"freeze_trainable_layers": 1, **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
|
||||
for name, param in model.named_parameters():
|
||||
if name.startswith("model.layers.1."):
|
||||
assert param.requires_grad is True
|
||||
|
@ -60,12 +56,7 @@ def test_freeze_train_all_modules():
|
|||
|
||||
|
||||
def test_freeze_train_extra_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(
|
||||
{"freeze_trainable_layers": 1, "freeze_extra_modules": "embed_tokens,lm_head", **TRAIN_ARGS}
|
||||
)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||
for name, param in model.named_parameters():
|
||||
if name.startswith("model.layers.1.") or any(module in name for module in ["embed_tokens", "lm_head"]):
|
||||
assert param.requires_grad is True
|
||||
|
@ -76,10 +67,7 @@ def test_freeze_train_extra_modules():
|
|||
|
||||
|
||||
def test_freeze_inference():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
|
||||
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
for param in model.parameters():
|
||||
assert param.requires_grad is False
|
||||
assert param.dtype == torch.float16
|
||||
|
|
|
@ -16,8 +16,7 @@ import os
|
|||
|
||||
import torch
|
||||
|
||||
from llamafactory.hparams import get_infer_args, get_train_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -46,20 +45,14 @@ INFER_ARGS = {
|
|||
|
||||
|
||||
def test_full_train():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
model = load_train_model(**TRAIN_ARGS)
|
||||
for param in model.parameters():
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
|
||||
|
||||
def test_full_inference():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
|
||||
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
for param in model.parameters():
|
||||
assert param.requires_grad is False
|
||||
assert param.dtype == torch.float16
|
||||
|
|
|
@ -13,17 +13,18 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from typing import Dict, Sequence
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from peft import LoraModel, PeftModel
|
||||
from transformers import AutoModelForCausalLM
|
||||
from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
from llamafactory.extras.misc import get_current_device
|
||||
from llamafactory.hparams import get_infer_args, get_train_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import (
|
||||
check_lora_model,
|
||||
compare_model,
|
||||
load_infer_model,
|
||||
load_reference_model,
|
||||
load_train_model,
|
||||
patch_valuehead_model,
|
||||
)
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -56,116 +57,38 @@ INFER_ARGS = {
|
|||
}
|
||||
|
||||
|
||||
def load_reference_model(is_trainable: bool = False) -> "LoraModel":
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
TINY_LLAMA, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
lora_model = PeftModel.from_pretrained(model, TINY_LLAMA_ADAPTER, is_trainable=is_trainable)
|
||||
for param in filter(lambda p: p.requires_grad, lora_model.parameters()):
|
||||
param.data = param.data.to(torch.float32)
|
||||
|
||||
return lora_model
|
||||
|
||||
|
||||
def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module", diff_keys: Sequence[str] = []):
|
||||
state_dict_a = model_a.state_dict()
|
||||
state_dict_b = model_b.state_dict()
|
||||
assert set(state_dict_a.keys()) == set(state_dict_b.keys())
|
||||
for name in state_dict_a.keys():
|
||||
if any(key in name for key in diff_keys):
|
||||
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5) is False
|
||||
else:
|
||||
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5) is True
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fix_valuehead_cpu_loading():
|
||||
def post_init(self: "AutoModelForCausalLMWithValueHead", state_dict: Dict[str, "torch.Tensor"]):
|
||||
state_dict = {k[7:]: state_dict[k] for k in state_dict.keys() if k.startswith("v_head.")}
|
||||
self.v_head.load_state_dict(state_dict, strict=False)
|
||||
del state_dict
|
||||
|
||||
AutoModelForCausalLMWithValueHead.post_init = post_init
|
||||
patch_valuehead_model()
|
||||
|
||||
|
||||
def test_lora_train_qv_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"lora_target": "q_proj,v_proj", **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
linear_modules = set()
|
||||
for name, param in model.named_parameters():
|
||||
if any(module in name for module in ["lora_A", "lora_B"]):
|
||||
linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1])
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
else:
|
||||
assert param.requires_grad is False
|
||||
assert param.dtype == torch.float16
|
||||
|
||||
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
|
||||
linear_modules, _ = check_lora_model(model)
|
||||
assert linear_modules == {"q_proj", "v_proj"}
|
||||
|
||||
|
||||
def test_lora_train_all_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"lora_target": "all", **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
linear_modules = set()
|
||||
for name, param in model.named_parameters():
|
||||
if any(module in name for module in ["lora_A", "lora_B"]):
|
||||
linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1])
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
else:
|
||||
assert param.requires_grad is False
|
||||
assert param.dtype == torch.float16
|
||||
|
||||
model = load_train_model(lora_target="all", **TRAIN_ARGS)
|
||||
linear_modules, _ = check_lora_model(model)
|
||||
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
|
||||
|
||||
|
||||
def test_lora_train_extra_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(
|
||||
{"lora_target": "all", "additional_target": "embed_tokens,lm_head", **TRAIN_ARGS}
|
||||
)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
extra_modules = set()
|
||||
for name, param in model.named_parameters():
|
||||
if any(module in name for module in ["lora_A", "lora_B"]):
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
elif "modules_to_save" in name:
|
||||
extra_modules.add(name.split(".modules_to_save", maxsplit=1)[0].split(".")[-1])
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
else:
|
||||
assert param.requires_grad is False
|
||||
assert param.dtype == torch.float16
|
||||
|
||||
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||
_, extra_modules = check_lora_model(model)
|
||||
assert extra_modules == {"embed_tokens", "lm_head"}
|
||||
|
||||
|
||||
def test_lora_train_old_adapters():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(
|
||||
{"adapter_name_or_path": TINY_LLAMA_ADAPTER, "create_new_adapter": False, **TRAIN_ARGS}
|
||||
)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
ref_model = load_reference_model(is_trainable=True)
|
||||
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
||||
compare_model(model, ref_model)
|
||||
|
||||
|
||||
def test_lora_train_new_adapters():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(
|
||||
{"adapter_name_or_path": TINY_LLAMA_ADAPTER, "create_new_adapter": True, **TRAIN_ARGS}
|
||||
)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
ref_model = load_reference_model(is_trainable=True)
|
||||
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
||||
compare_model(
|
||||
model, ref_model, diff_keys=["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"]
|
||||
)
|
||||
|
@ -173,26 +96,15 @@ def test_lora_train_new_adapters():
|
|||
|
||||
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
||||
def test_lora_train_valuehead():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(
|
||||
tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True, add_valuehead=True
|
||||
)
|
||||
|
||||
ref_model: "AutoModelForCausalLMWithValueHead" = AutoModelForCausalLMWithValueHead.from_pretrained(
|
||||
TINY_LLAMA_VALUEHEAD, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA_VALUEHEAD, is_trainable=True, add_valuehead=True)
|
||||
state_dict = model.state_dict()
|
||||
ref_state_dict = ref_model.state_dict()
|
||||
|
||||
assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"])
|
||||
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
|
||||
|
||||
|
||||
def test_lora_inference():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
|
||||
|
||||
ref_model = load_reference_model().merge_and_unload()
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA, TINY_LLAMA_ADAPTER, use_lora=True).merge_and_unload()
|
||||
compare_model(model, ref_model)
|
||||
|
|
|
@ -14,13 +14,7 @@
|
|||
|
||||
import os
|
||||
|
||||
import torch
|
||||
from peft import LoraModel, PeftModel
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
from llamafactory.extras.misc import get_current_device
|
||||
from llamafactory.hparams import get_infer_args, get_train_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
from llamafactory.train.test_utils import compare_model, load_infer_model, load_reference_model, load_train_model
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
@ -54,37 +48,14 @@ INFER_ARGS = {
|
|||
}
|
||||
|
||||
|
||||
def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module"):
|
||||
state_dict_a = model_a.state_dict()
|
||||
state_dict_b = model_b.state_dict()
|
||||
assert set(state_dict_a.keys()) == set(state_dict_b.keys())
|
||||
for name in state_dict_a.keys():
|
||||
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5)
|
||||
|
||||
|
||||
def test_pissa_init():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained(
|
||||
TINY_LLAMA_PISSA, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
ref_model = PeftModel.from_pretrained(base_model, TINY_LLAMA_PISSA, subfolder="pissa_init", is_trainable=True)
|
||||
for param in filter(lambda p: p.requires_grad, ref_model.parameters()):
|
||||
param.data = param.data.to(torch.float32)
|
||||
|
||||
def test_pissa_train():
|
||||
model = load_train_model(**TRAIN_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True)
|
||||
compare_model(model, ref_model)
|
||||
|
||||
|
||||
def test_pissa_inference():
|
||||
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained(
|
||||
TINY_LLAMA_PISSA, torch_dtype=torch.float16, device_map=get_current_device()
|
||||
)
|
||||
ref_model: "LoraModel" = PeftModel.from_pretrained(base_model, TINY_LLAMA_PISSA, subfolder="pissa_init")
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=False)
|
||||
ref_model = ref_model.merge_and_unload()
|
||||
compare_model(model, ref_model)
|
||||
|
|
Loading…
Reference in New Issue