|
720 | 720 | " AIMessage(content='yes!', additional_kwargs={}, response_metadata={})]"
|
721 | 721 | ]
|
722 | 722 | },
|
723 |
| - "execution_count": 23, |
| 723 | + "execution_count": 109, |
724 | 724 | "metadata": {},
|
725 | 725 | "output_type": "execute_result"
|
726 | 726 | }
|
|
771 | 771 | "\n",
|
772 | 772 | "\n",
|
773 | 773 | "def call_model(state: State):\n",
|
| 774 | + " print(f\"Messages before trimming: {len(state['messages'])}\")\n", |
774 | 775 | " # highlight-start\n",
|
775 | 776 | " trimmed_messages = trimmer.invoke(state[\"messages\"])\n",
|
| 777 | + " print(f\"Messages after trimming: {len(trimmed_messages)}\")\n", |
| 778 | + " print(\"Remaining messages:\")\n", |
| 779 | + " for msg in trimmed_messages:\n", |
| 780 | + " print(f\" {type(msg).__name__}: {msg.content}\")\n", |
776 | 781 | " prompt = prompt_template.invoke(\n",
|
777 | 782 | " {\"messages\": trimmed_messages, \"language\": state[\"language\"]}\n",
|
778 | 783 | " )\n",
|
|
792 | 797 | "cell_type": "markdown",
|
793 | 798 | "metadata": {},
|
794 | 799 | "source": [
|
795 |
| - "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history:" |
| 800 | + "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history. (By defining our trim stragegy as `'last'`, we are only keeping the most recent messages that fit within the `max_tokens`.)" |
796 | 801 | ]
|
797 | 802 | },
|
798 | 803 | {
|
|
804 | 809 | "name": "stdout",
|
805 | 810 | "output_type": "stream",
|
806 | 811 | "text": [
|
| 812 | + "Messages before trimming: 12\n", |
| 813 | + "Messages after trimming: 8\n", |
| 814 | + "Remaining messages:\n", |
| 815 | + " SystemMessage: you're a good assistant\n", |
| 816 | + " HumanMessage: whats 2 + 2\n", |
| 817 | + " AIMessage: 4\n", |
| 818 | + " HumanMessage: thanks\n", |
| 819 | + " AIMessage: no problem!\n", |
| 820 | + " HumanMessage: having fun?\n", |
| 821 | + " AIMessage: yes!\n", |
| 822 | + " HumanMessage: What is my name?\n", |
807 | 823 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
808 | 824 | "\n",
|
809 |
| - "I don't know your name. You haven't told me yet!\n" |
| 825 | + "I don't know your name. If you'd like to share it, feel free!\n" |
810 | 826 | ]
|
811 | 827 | }
|
812 | 828 | ],
|
|
840 | 856 | "name": "stdout",
|
841 | 857 | "output_type": "stream",
|
842 | 858 | "text": [
|
| 859 | + "Messages before trimming: 12\n", |
| 860 | + "Messages after trimming: 8\n", |
| 861 | + "Remaining messages:\n", |
| 862 | + " SystemMessage: you're a good assistant\n", |
| 863 | + " HumanMessage: whats 2 + 2\n", |
| 864 | + " AIMessage: 4\n", |
| 865 | + " HumanMessage: thanks\n", |
| 866 | + " AIMessage: no problem!\n", |
| 867 | + " HumanMessage: having fun?\n", |
| 868 | + " AIMessage: yes!\n", |
| 869 | + " HumanMessage: What math problem was asked?\n", |
843 | 870 | "==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
844 | 871 | "\n",
|
845 |
| - "You asked what 2 + 2 equals.\n" |
| 872 | + "The math problem that was asked was \"what's 2 + 2.\"\n" |
846 | 873 | ]
|
847 | 874 | }
|
848 | 875 | ],
|
849 | 876 | "source": [
|
850 | 877 | "config = {\"configurable\": {\"thread_id\": \"abc678\"}}\n",
|
851 |
| - "query = \"What math problem did I ask?\"\n", |
| 878 | + "\n", |
| 879 | + "query = \"What math problem was asked?\"\n", |
852 | 880 | "language = \"English\"\n",
|
853 | 881 | "\n",
|
854 | 882 | "input_messages = messages + [HumanMessage(query)]\n",
|
|
890 | 918 | "text": [
|
891 | 919 | "|Hi| Todd|!| Here|’s| a| joke| for| you|:\n",
|
892 | 920 | "\n",
|
893 |
| - "|Why| don|’t| skeleton|s| fight| each| other|?\n", |
| 921 | + "|Why| don't| scientists| trust| atoms|?\n", |
894 | 922 | "\n",
|
895 |
| - "|Because| they| don|’t| have| the| guts|!||" |
| 923 | + "|Because| they| make| up| everything|!||" |
896 | 924 | ]
|
897 | 925 | }
|
898 | 926 | ],
|
|
0 commit comments