Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate type='tuples for chatbot and focus chatbot docs on 'messages' type #9194

Merged
merged 15 commits into from
Aug 29, 2024
6 changes: 6 additions & 0 deletions .changeset/deep-bananas-switch.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
"gradio": minor
"website": minor
---

feat:Deprecate type='tuples for chatbot and focus chatbot docs on 'messages' type
2 changes: 1 addition & 1 deletion demo/chatbot_core_components/run.ipynb

Large diffs are not rendered by default.

35 changes: 19 additions & 16 deletions demo/chatbot_core_components/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,57 +148,60 @@ def random_matplotlib_plot():

def add_message(history, message):
for x in message["files"]:
history.append(((x,), None))
history.append({"role": "user", "content": {"path": x}})
if message["text"] is not None:
history.append((message["text"], None))
history.append({"role": "user", "content": message["text"]})
return history, gr.MultimodalTextbox(value=None, interactive=False)

def bot(history, response_type):
msg = {"role": "assistant", "content": ""}
if response_type == "plot":
history[-1][1] = gr.Plot(random_plot())
content = gr.Plot(random_plot())
elif response_type == "bokeh_plot":
history[-1][1] = gr.Plot(random_bokeh_plot())
content = gr.Plot(random_bokeh_plot())
elif response_type == "matplotlib_plot":
history[-1][1] = gr.Plot(random_matplotlib_plot())
content = gr.Plot(random_matplotlib_plot())
elif response_type == "gallery":
history[-1][1] = gr.Gallery(
content = gr.Gallery(
[os.path.join("files", "avatar.png"), os.path.join("files", "avatar.png")]
)
elif response_type == "image":
history[-1][1] = gr.Image(os.path.join("files", "avatar.png"))
content = gr.Image(os.path.join("files", "avatar.png"))
elif response_type == "video":
history[-1][1] = gr.Video(os.path.join("files", "world.mp4"))
content = gr.Video(os.path.join("files", "world.mp4"))
elif response_type == "audio":
history[-1][1] = gr.Audio(os.path.join("files", "audio.wav"))
content = gr.Audio(os.path.join("files", "audio.wav"))
elif response_type == "audio_file":
history[-1][1] = (os.path.join("files", "audio.wav"), "description")
content = {"path": os.path.join("files", "audio.wav"), "alt_text": "description"}
elif response_type == "image_file":
history[-1][1] = (os.path.join("files", "avatar.png"), "description")
content = {"path": os.path.join("files", "avatar.png"), "alt_text": "description"}
elif response_type == "video_file":
history[-1][1] = (os.path.join("files", "world.mp4"), "description")
content = {"path": os.path.join("files", "world.mp4"), "alt_text": "description"}
elif response_type == "txt_file":
history[-1][1] = (os.path.join("files", "sample.txt"), "description")
content = {"path": os.path.join("files", "sample.txt"), "alt_text": "description"}
elif response_type == "html":
history[-1][1] = gr.HTML(
content = gr.HTML(
html_src(random.choice(["harmful", "neutral", "beneficial"]))
)
else:
history[-1][1] = txt
content = txt
msg["content"] = content # type: ignore
history.append(msg)
return history

fig = random_plot()

with gr.Blocks(fill_height=True) as demo:
chatbot = gr.Chatbot(
elem_id="chatbot",
type="messages",
bubble_full_width=False,
scale=1,
show_copy_button=True,
avatar_images=(
None, # os.path.join("files", "avatar.png"),
os.path.join("files", "avatar.png"),
),
# layout="panel",
)
response_type = gr.Radio(
[
Expand Down
2 changes: 1 addition & 1 deletion demo/chatbot_multimodal/run.ipynb
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/messages_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import plotly.express as px\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "def random_plot():\n", " df = px.data.iris()\n", " fig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"species\",\n", " size='petal_length', hover_data=['petal_width'])\n", " return fig\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history):\n", " history[-1][1] = \"Cool!\"\n", " return history\n", "\n", "fig = random_plot()\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/tuples_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append({\"role\": \"user\", \"content\": {\"path\": x}})\n", " if message[\"text\"] is not None:\n", " history.append({\"role\": \"user\", \"content\": message[\"text\"]})\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1]['content'] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " type=\"messages\"\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
28 changes: 12 additions & 16 deletions demo/chatbot_multimodal/run.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,31 @@
import gradio as gr
import plotly.express as px
import time

# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.

def random_plot():
df = px.data.iris()
fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species",
size='petal_length', hover_data=['petal_width'])
return fig

def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)

def add_message(history, message):
for x in message["files"]:
history.append(((x,), None))
history.append({"role": "user", "content": {"path": x}})
if message["text"] is not None:
history.append((message["text"], None))
history.append({"role": "user", "content": message["text"]})
return history, gr.MultimodalTextbox(value=None, interactive=False)

def bot(history):
history[-1][1] = "Cool!"
return history

fig = random_plot()
def bot(history: list):
response = "**That's cool!**"
history.append({"role": "assistant", "content": ""})
for character in response:
history[-1]['content'] += character
time.sleep(0.05)
yield history

with gr.Blocks(fill_height=True) as demo:
with gr.Blocks() as demo:
chatbot = gr.Chatbot(
elem_id="chatbot",
bubble_full_width=False,
scale=1,
type="messages"
)

chat_input = gr.MultimodalTextbox(interactive=True,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,32 +1,35 @@
import gradio as gr
import time
import plotly.express as px

# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.

def random_plot():
df = px.data.iris()
fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species",
size='petal_length', hover_data=['petal_width'])
return fig

def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)

def add_message(history, message):
for x in message["files"]:
history.append({"role": "user", "content": {"path": x}})
history.append(((x,), None))
if message["text"] is not None:
history.append({"role": "user", "content": message["text"]})
history.append((message["text"], None))
return history, gr.MultimodalTextbox(value=None, interactive=False)

def bot(history: list):
response = "**That's cool!**"
history.append({"role": "assistant", "content": ""})
for character in response:
history[-1]['content'] += character
time.sleep(0.05)
yield history
def bot(history):
history[-1][1] = "Cool!"
return history

fig = random_plot()

with gr.Blocks() as demo:
with gr.Blocks(fill_height=True) as demo:
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
type="messages"
scale=1,
)

chat_input = gr.MultimodalTextbox(interactive=True,
Expand Down
2 changes: 1 addition & 1 deletion demo/chatbot_simple/run.ipynb
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " msg = gr.Textbox()\n", " clear = gr.ClearButton([msg, chatbot])\n", "\n", " def respond(message, chat_history):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " chat_history.append((message, bot_message))\n", " time.sleep(2)\n", " return \"\", chat_history\n", "\n", " msg.submit(respond, [msg, chatbot], [msg, chatbot])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(type=\"messages\")\n", " msg = gr.Textbox()\n", " clear = gr.ClearButton([msg, chatbot])\n", "\n", " def respond(message, chat_history):\n", " bot_message = random.choice([\"How are you?\", \"Today is a great day\", \"I'm very hungry\"])\n", " chat_history.append({\"role\": \"user\", \"content\": message})\n", " chat_history.append({\"role\": \"assistant\", \"content\": bot_message})\n", " time.sleep(2)\n", " return \"\", chat_history\n", "\n", " msg.submit(respond, [msg, chatbot], [msg, chatbot])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
7 changes: 4 additions & 3 deletions demo/chatbot_simple/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@
import time

with gr.Blocks() as demo:
chatbot = gr.Chatbot()
chatbot = gr.Chatbot(type="messages")
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])

def respond(message, chat_history):
bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
chat_history.append((message, bot_message))
bot_message = random.choice(["How are you?", "Today is a great day", "I'm very hungry"])
chat_history.append({"role": "user", "content": message})
chat_history.append({"role": "assistant", "content": bot_message})
time.sleep(2)
return "", chat_history

Expand Down
2 changes: 1 addition & 1 deletion demo/chatbot_streaming/run.ipynb
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_streaming"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_streaming/testcase_messages.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " msg = gr.Textbox()\n", " clear = gr.Button(\"Clear\")\n", "\n", " def user(user_message, history):\n", " return \"\", history + [[user_message, None]]\n", "\n", " def bot(history):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " history[-1][1] = \"\"\n", " for character in bot_message:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", " msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " clear.click(lambda: None, None, chatbot, queue=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_streaming"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(type=\"messages\")\n", " msg = gr.Textbox()\n", " clear = gr.Button(\"Clear\")\n", "\n", " def user(user_message, history: list):\n", " return \"\", history + [{\"role\": \"user\", \"content\": user_message}]\n", "\n", " def bot(history: list):\n", " bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in bot_message:\n", " history[-1]['content'] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", " msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " clear.click(lambda: None, None, chatbot, queue=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
12 changes: 6 additions & 6 deletions demo/chatbot_streaming/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,18 @@
import time

with gr.Blocks() as demo:
chatbot = gr.Chatbot()
chatbot = gr.Chatbot(type="messages")
msg = gr.Textbox()
clear = gr.Button("Clear")

def user(user_message, history):
return "", history + [[user_message, None]]
def user(user_message, history: list):
return "", history + [{"role": "user", "content": user_message}]

def bot(history):
def bot(history: list):
bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
history[-1][1] = ""
history.append({"role": "assistant", "content": ""})
for character in bot_message:
history[-1][1] += character
history[-1]['content'] += character
time.sleep(0.05)
yield history

Expand Down
27 changes: 0 additions & 27 deletions demo/chatbot_streaming/testcase_messages.py

This file was deleted.

Loading
Loading