ZHZisZZ commited on
Commit
f89178d
·
1 Parent(s): 762ba08
cua_lite/data/interfaces/_qwen3_vl.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import json
3
+ import copy
4
+ from typing import Callable, Any
5
+
6
+ from cua_lite.data.interfaces.base import UnrolledContextDataInterface
7
+
8
+
9
+ DESCRIPTION_PROMPT = """Use a mouse and keyboard to interact with a computer, and take screenshots.
10
+ * This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
11
+ * Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. For example, if you click on Firefox and a window does not open, try waiting and then taking another screenshot.
12
+ * The screen resolution is 1000x1000.
13
+ * Whenever you intend to move the cursor to click on an element such as an icon, consult a screenshot first to determine the element’s coordinates before moving the cursor.
14
+ * If you tried clicking on a program or link but it failed to load even after waiting, adjust your cursor position so that the tip of the cursor visually falls on the element you want to click.
15
+ * Make sure to click any buttons, links, icons, or other elements with the cursor tip in the center of the element. Do not click on edges unless explicitly instructed.\
16
+ """
17
+
18
+ ACTION_DESCRIPTION_PROMPT = """\
19
+ * `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.
20
+ * `type`: Type a string of text on the keyboard.
21
+ * `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.
22
+ * `left_click`: Click the left mouse button at a specified (x, y) pixel coordinate on the screen.
23
+ * `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.
24
+ * `right_click`: Click the right mouse button at a specified (x, y) pixel coordinate on the screen.
25
+ * `middle_click`: Click the middle mouse button at a specified (x, y) pixel coordinate on the screen.
26
+ * `double_click`: Double-click the left mouse button at a specified (x, y) pixel coordinate on the screen.
27
+ * `triple_click`: Triple-click the left mouse button at a specified (x, y) pixel coordinate on the screen (simulated as double-click since it's the closest action).
28
+ * `scroll`: Performs a scroll of the mouse scroll wheel.
29
+ * `hscroll`: Performs a horizontal scroll (mapped to regular scroll).
30
+ * `wait`: Wait specified seconds for the change to happen.
31
+ * `terminate`: Terminate the current task and report its completion status.
32
+ * `answer`: Answer a question.\
33
+ """
34
+
35
+ TOOLS_DEF = {
36
+ "type": "function",
37
+ "function": {
38
+ "name_for_human": "computer_use",
39
+ "name": "computer_use",
40
+ "description": DESCRIPTION_PROMPT,
41
+ "parameters": {
42
+ "properties": {
43
+ "action": {
44
+ "description": ACTION_DESCRIPTION_PROMPT,
45
+ "enum": [
46
+ "key",
47
+ "type",
48
+ "mouse_move",
49
+ "left_click",
50
+ "left_click_drag",
51
+ "right_click",
52
+ "middle_click",
53
+ "double_click",
54
+ "scroll",
55
+ "wait",
56
+ "terminate",
57
+ ],
58
+ "type": "string",
59
+ },
60
+ "keys": {
61
+ "description": "Required only by `action=key`.",
62
+ "type": "array",
63
+ },
64
+ "text": {
65
+ "description": "Required only by `action=type`.",
66
+ "type": "string",
67
+ },
68
+ "coordinate": {
69
+ "description": "The x,y coordinates for mouse actions.",
70
+ "type": "array",
71
+ },
72
+ "pixels": {"description": "The amount of scrolling.", "type": "number"},
73
+ "time": {"description": "The seconds to wait.", "type": "number"},
74
+ "status": {
75
+ "description": "The status of the task.",
76
+ "type": "string",
77
+ "enum": ["success", "failure"],
78
+ },
79
+ },
80
+ "required": ["action"],
81
+ "type": "object",
82
+ },
83
+ "args_format": "Format the arguments as a JSON object.",
84
+ },
85
+ }
86
+
87
+ SYSTEM_PROMPT = (
88
+ """# Tools
89
+
90
+ You may call one or more functions to assist with the user query.
91
+
92
+ You are provided with function signatures within <tools></tools> XML tags:
93
+ <tools>
94
+ """
95
+ + json.dumps(TOOLS_DEF)
96
+ + """
97
+ </tools>
98
+
99
+ For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
100
+ <tool_call>
101
+ {"name": <function-name>, "arguments": <args-json-object>}
102
+ </tool_call>
103
+
104
+ # Response format
105
+
106
+ Response format for every step:
107
+ 1) Action: a short imperative describing what to do in the UI.
108
+ 2) A single <tool_call>...</tool_call> block containing only the JSON: {"name": <function-name>, "arguments": <args-json-object>}.
109
+
110
+ Rules:
111
+ - Output exactly in the order: Action, <tool_call>.
112
+ - Be brief: one sentence for Action.
113
+ - Do not output anything else outside those parts.
114
+ - If finishing, use action=terminate in the tool call."""
115
+ )
116
+
117
+
118
+ INSTRUCTION_PROMPT = """
119
+ Please generate the next move according to the UI screenshot, instruction and previous actions.
120
+
121
+ Instruction: {instruction}
122
+
123
+ Previous actions:
124
+ {previous_actions}"""
125
+
126
+
127
+ @dataclasses.dataclass
128
+ class Qwen3VLDataInterface(UnrolledContextDataInterface):
129
+
130
+ history_n: int = 4
131
+ add_system_prompt: bool = True
132
+
133
+ def process_context(self, row: dict[str, Any]) -> dict[str, Any]:
134
+ # 1. 基础拆分
135
+ messages = row["messages"]
136
+
137
+ # 2. 按 Step 分组 (User + Assistant)
138
+ steps = []
139
+ for i in range(0, len(messages), 2):
140
+ step = {
141
+ "user": messages[i],
142
+ "assistant": messages[i + 1] if i + 1 < len(messages) else None,
143
+ }
144
+ steps.append(step)
145
+
146
+ total_steps = len(steps)
147
+
148
+ # 3. 计算保留逻辑
149
+ steps_to_keep = self.history_n + 1
150
+
151
+ # ======== FIX 1: 不要在这里 return ========
152
+ # 如果总步数不够截断,就令 truncate_count=0(anchor=Step1),但仍然重写 INSTR_PROMPT
153
+ truncate_count = max(0, total_steps - steps_to_keep)
154
+ # =========================================
155
+
156
+ # 4. 提取 Step 1 的指令
157
+ step1_content = steps[0]["user"]["content"]
158
+ instruction_text = next(
159
+ item["text"] for item in step1_content if item.get("type") == "text"
160
+ )
161
+
162
+ # if "Previous actions" in instruction_text:
163
+ # instruction_text = instruction_text.split("Previous actions")[0].strip()
164
+
165
+ # # 规范成 “纯 instruction 内容”
166
+ # if instruction_text.startswith("Instruction:"):
167
+ # instruction_text = instruction_text[len("Instruction:"):].strip()
168
+
169
+ # 5. 生成历史摘要(被截断掉的那些步)
170
+ summary_lines = []
171
+ for i in range(truncate_count):
172
+ action = (
173
+ steps[i]["assistant"]["content"]
174
+ if isinstance(steps[i]["assistant"]["content"], str)
175
+ else steps[i]["assistant"]["content"][0]["text"]
176
+ )
177
+ summary_lines.append(f"Step {i+1}: {action}")
178
+
179
+ summary_block = "\n".join(summary_lines) if summary_lines else "None"
180
+
181
+ # 生成与 QwenAgent 一致的 INSTR_PROMPT
182
+ full_text_prompt = (
183
+ "\nPlease generate the next move according to the UI screenshot, instruction and previous actions.\n\n"
184
+ f"Instruction: {instruction_text}\n\n"
185
+ "Previous actions:\n"
186
+ f"{summary_block}"
187
+ )
188
+
189
+ # 6. 构建新消息列表
190
+ processed_messages = []
191
+ if self.add_system_prompt:
192
+ processed_messages.append(
193
+ {"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]}
194
+ )
195
+
196
+ # --- Anchor Step (新起点) ---
197
+ anchor_step = steps[truncate_count]
198
+ new_anchor_user = copy.deepcopy(anchor_step["user"])
199
+
200
+ # 找到 User 消息里的 text 字段并修改,或者追加
201
+ text_item = next(
202
+ (item for item in new_anchor_user["content"] if item.get("type") == "text"),
203
+ None,
204
+ )
205
+ if text_item:
206
+ text_item["text"] = full_text_prompt
207
+ else:
208
+ new_anchor_user["content"].append(
209
+ {"type": "text", "text": full_text_prompt}
210
+ )
211
+
212
+ processed_messages.append(new_anchor_user)
213
+ if anchor_step["assistant"]:
214
+ asst_msg = copy.deepcopy(anchor_step["assistant"])
215
+ # Prepend "Action: " using a clean reference
216
+ content_node = asst_msg["content"][0]
217
+ content_node["text"] = "Action: " + content_node["text"]
218
+ processed_messages.append(asst_msg)
219
+
220
+ # --- 追加剩余步骤 ---
221
+ for i in range(truncate_count + 1, total_steps):
222
+ # ======== FIX 2: 对齐 QwenAgent:非 anchor 的 user 只保留 image 部分 ========
223
+ u = copy.deepcopy(steps[i]["user"])
224
+ if isinstance(u.get("content"), list):
225
+ u["content"] = [it for it in u["content"] if it.get("type") != "text"]
226
+ processed_messages.append(u)
227
+ # ================================================================
228
+ if steps[i]["assistant"]:
229
+ asst_msg = copy.deepcopy(steps[i]["assistant"])
230
+ # Prepend "Action: " using a clean reference
231
+ content_node = asst_msg["content"][0]
232
+ content_node["text"] = "Action: " + content_node["text"]
233
+ processed_messages.append(asst_msg)
234
+
235
+ return {"messages": processed_messages}
236
+
237
+
238
+ if __name__ == "__main__":
239
+ from datasets import load_from_disk
240
+ from transformers import AutoProcessor
241
+ from cua_lite.data.utils import clean_nones
242
+
243
+ processor = AutoProcessor.from_pretrained(
244
+ "/mnt/lustrenew/mllm_aligned/shared/models/huggingface/Qwen/Qwen3-VL-2B-Thinking"
245
+ )
246
+
247
+ dataset = load_from_disk(".data/unzipped/scalecua/ubuntu/shard-00000-of-00256")
248
+ dataset.set_transform(clean_nones)
249
+
250
+ qwen3vl_interface = Qwen3VLDataInterface(history_n=0)
251
+ dataset_mapped = dataset.map(
252
+ qwen3vl_interface.process_context_batch, batched=True, batch_size=1000
253
+ )
254
+
255
+ inputs = processor.apply_chat_template(dataset_mapped["messages"][1], return_dict=True)
256
+ breakpoint()
cua_lite/data/interfaces/base.py CHANGED
@@ -4,31 +4,80 @@ from typing import Any
4
  from cua_lite.data.utils import batch_proc
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  @dataclasses.dataclass
8
  class BaseDataInterface:
9
 
10
- # --- Grounding ---
11
- # def process_grounding(self, row: dict[str, Any]) -> dict[str, Any]:
12
- # return row
13
 
14
- # def process_grounding_batch(self, batch: dict[str, list[Any]]) -> dict[str, list[Any]]:
15
- # return batch_proc(self.process_grounding, batch)
 
 
 
16
 
17
- # --- Planning ---
18
- def process_action(self, row: dict[str, Any]) -> dict[str, Any]:
19
  return row
20
 
21
- def process_action_batch(self, batch: dict[str, list[Any]]) -> dict[str, list[Any]]:
22
- return batch_proc(self.process_action, batch)
 
 
 
 
23
 
24
- # --- Context ---
25
- def process_context(self, row: dict[str, Any]) -> dict[str, Any]:
 
 
 
 
26
  return row
27
 
28
- def process_context_batch(
29
- self, batch: dict[str, list[Any]]
30
- ) -> dict[str, list[Any]]:
31
- return batch_proc(self.process_context, batch)
 
 
 
 
32
 
33
 
34
  @dataclasses.dataclass
@@ -36,9 +85,8 @@ class UnrolledContextDataInterface(BaseDataInterface):
36
 
37
  """Useful for reasoning models"""
38
 
39
- def process_context_batch(
40
- self, batch: dict[str, list[Any]]
41
- ) -> dict[str, list[Any]]:
42
  """
43
  Handles 1-to-N data expansion (Unrolling conversation history).
44
  Note: This changes the number of rows.
@@ -51,10 +99,9 @@ class UnrolledContextDataInterface(BaseDataInterface):
51
  ]
52
  for assistant_index in assistant_indices:
53
  # Slicing includes the assistant message
54
- context = messages[: assistant_index + 1]
55
- processed_context = self.process_context({"messages": context})[
56
- "messages"
57
- ]
58
- messages_list.append(processed_context)
59
 
60
- return {"messages": messages_list}
 
4
  from cua_lite.data.utils import batch_proc
5
 
6
 
7
+ # @dataclasses.dataclass
8
+ # class BaseDataInterface:
9
+
10
+ # # -- Helpers (shared between grounding and trajectory) --
11
+ # def _add_system_prompt(self, row: dict[str, Any], system_prompt: str | None = None) -> dict[str, Any]:
12
+ # if system_prompt and row["messages"][0]["role"] != "system":
13
+ # row["messages"].insert(0, {"role": "system", "content": [{"type": "text", "text": system_prompt}]})
14
+ # return row
15
+
16
+ # def _process_action(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
17
+ # return row
18
+
19
+ # # -- Grounding ---
20
+ # def process_grounding(self, row: dict[str, Any], system_prompt: str | None = None, **kwargs) -> dict[str, Any]:
21
+ # return self._add_system_prompt(
22
+ # self._process_action(row, **kwargs),
23
+ # system_prompt=system_prompt
24
+ # )
25
+
26
+ # def process_grounding_batch(self, batch: dict[str, list[Any]], **kwargs) -> dict[str, list[Any]]:
27
+ # return batch_proc(self.process_grounding, batch, **kwargs)
28
+
29
+ # # --- Trajectory ---
30
+ # def _process_context(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
31
+ # return row
32
+
33
+ # def process_trajectory(self, row: dict[str, Any], system_prompt: str | None = None, **kwargs) -> dict[str, Any]:
34
+ # return self._add_system_prompt(
35
+ # self._process_context(self._process_action(row, **kwargs), **kwargs),
36
+ # system_prompt=system_prompt
37
+ # )
38
+
39
+ # def process_trajectory_batch(self, batch: dict[str, list[Any]], **kwargs) -> dict[str, list[Any]]:
40
+ # return batch_proc(self.process_trajectory, batch, **kwargs)
41
+
42
  @dataclasses.dataclass
43
  class BaseDataInterface:
44
 
45
+ grounding_system_prompt: str | None = None
46
+ trajectory_system_prompt: str | None = None
 
47
 
48
+ # -- Helpers (shared between grounding and trajectory) --
49
+ def _add_system_prompt(self, row: dict[str, Any], system_prompt: str | None = None) -> dict[str, Any]:
50
+ if system_prompt and row["messages"][0]["role"] != "system":
51
+ row["messages"].insert(0, {"role": "system", "content": [{"type": "text", "text": system_prompt}]})
52
+ return row
53
 
54
+ def _process_action(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
55
+ # TODO: overwrite this in subclass
56
  return row
57
 
58
+ # -- Grounding ---
59
+ def process_grounding(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
60
+ return self._add_system_prompt(
61
+ self._process_action(row, **kwargs),
62
+ system_prompt=self.grounding_system_prompt
63
+ )
64
 
65
+ def process_grounding_batch(self, batch: dict[str, list[Any]], **kwargs) -> dict[str, list[Any]]:
66
+ return batch_proc(self.process_grounding, batch, **kwargs)
67
+
68
+ # --- Trajectory ---
69
+ def _process_context(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
70
+ # TODO: overwrite this in subclass
71
  return row
72
 
73
+ def process_trajectory(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
74
+ return self._add_system_prompt(
75
+ self._process_context(self._process_action(row, **kwargs), **kwargs),
76
+ system_prompt=self.trajectory_system_prompt
77
+ )
78
+
79
+ def process_trajectory_batch(self, batch: dict[str, list[Any]], **kwargs) -> dict[str, list[Any]]:
80
+ return batch_proc(self.process_trajectory, batch, **kwargs)
81
 
82
 
83
  @dataclasses.dataclass
 
85
 
86
  """Useful for reasoning models"""
87
 
88
+ def process_trajectory_batch(
89
+ self, batch: dict[str, list[Any]], **kwargs) -> dict[str, list[Any]]:
 
90
  """
91
  Handles 1-to-N data expansion (Unrolling conversation history).
92
  Note: This changes the number of rows.
 
99
  ]
100
  for assistant_index in assistant_indices:
101
  # Slicing includes the assistant message
102
+ processed_messages = self.process_trajectory({"messages": messages[: assistant_index + 1]}, **kwargs)["messages"]
103
+ messages_list.append(processed_messages)
104
+
105
+ batch = {"messages": messages_list}
 
106
 
107
+ return batch
cua_lite/data/interfaces/qwen3_vl/interfaces.py CHANGED
@@ -11,121 +11,123 @@ from cua_lite.data.interfaces.qwen3_vl.prompts import (
11
  MOBILE_SYSTEM_PROMPT,
12
  )
13
 
 
 
 
 
 
 
 
 
14
 
15
  @dataclasses.dataclass
16
- class Qwen3VLDataInterface(UnrolledContextDataInterface):
17
 
18
  history_n: int = 4
19
 
20
- def process_context(self, row: dict[str, Any]) -> dict[str, Any]:
21
- # 1. 基础拆分
22
- messages = row["messages"]
23
 
24
- # 2. Step 分组 (User + Assistant)
25
- steps = []
26
- for i in range(0, len(messages), 2):
27
- step = {
28
- "user": messages[i],
29
- "assistant": messages[i + 1] if i + 1 < len(messages) else None,
30
- }
31
- steps.append(step)
32
 
33
- total_steps = len(steps)
34
-
35
- # 3. 计算保留逻辑
36
  steps_to_keep = self.history_n + 1
 
37
 
38
- # ======== FIX 1: 不要在这里 return ========
39
- # 如果总步数不够截断,就令 truncate_count=0(anchor=Step1),但仍然重写 INSTR_PROMPT
40
- truncate_count = max(0, total_steps - steps_to_keep)
41
- # =========================================
42
-
43
- # 4. 提取 Step 1 的指令
44
- step1_content = steps[0]["user"]["content"]
45
- instruction_text = next(
46
- item["text"] for item in step1_content if item.get("type") == "text"
47
  )
48
 
49
- # if "Previous actions" in instruction_text:
50
- # instruction_text = instruction_text.split("Previous actions")[0].strip()
51
-
52
- # # 规范成 “纯 instruction 内容”
53
- # if instruction_text.startswith("Instruction:"):
54
- # instruction_text = instruction_text[len("Instruction:"):].strip()
55
-
56
- # 5. 生成历史摘要(被截断掉的那些步)
57
- summary_lines = []
58
- for i in range(truncate_count):
59
- action = (
60
- steps[i]["assistant"]["content"]
61
- if isinstance(steps[i]["assistant"]["content"], str)
62
- else steps[i]["assistant"]["content"][0]["text"]
63
- )
64
- summary_lines.append(f"Step {i+1}: {action}")
65
-
66
  summary_block = "\n".join(summary_lines) if summary_lines else "None"
67
 
68
- # 生成与 QwenAgent 一致的 INSTR_PROMPT
69
- # full_text_prompt = (
70
- # "\nPlease generate the next move according to the UI screenshot, instruction and previous actions.\n\n"
71
- # f"Instruction: {instruction_text}\n\n"
72
- # "Previous actions:\n"
73
- # f"{summary_block}"
74
- # )
75
- full_text_prompt = INSTRUCTION_PROMPT.format(
76
- instruction=instruction_text,
77
- previous_actions=summary_block,
78
  )
79
 
80
- # 6. 构建新消息列表
81
  processed_messages = []
82
- if self.add_system_prompt:
83
- processed_messages.append(
84
- {"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]}
85
- )
86
-
87
- # --- Anchor Step (新起点) ---
88
- anchor_step = steps[truncate_count]
89
- new_anchor_user = copy.deepcopy(anchor_step["user"])
90
-
91
- # 找到 User 消息里的 text 字段并修改,或者追加
92
- text_item = next(
93
- (item for item in new_anchor_user["content"] if item.get("type") == "text"),
94
- None,
95
- )
96
- if text_item:
97
- text_item["text"] = full_text_prompt
98
- else:
99
- new_anchor_user["content"].append(
100
- {"type": "text", "text": full_text_prompt}
101
- )
102
-
103
- processed_messages.append(new_anchor_user)
104
- if anchor_step["assistant"]:
105
- asst_msg = copy.deepcopy(anchor_step["assistant"])
106
- # Prepend "Action: " using a clean reference
107
- content_node = asst_msg["content"][0]
108
- content_node["text"] = "Action: " + content_node["text"]
109
- processed_messages.append(asst_msg)
110
-
111
- # --- 追加剩余步骤 ---
112
- for i in range(truncate_count + 1, total_steps):
113
- # ======== FIX 2: 对齐 QwenAgent:非 anchor 的 user 只保留 image 部分 ========
114
- u = copy.deepcopy(steps[i]["user"])
115
- if isinstance(u.get("content"), list):
116
- u["content"] = [it for it in u["content"] if it.get("type") != "text"]
117
- processed_messages.append(u)
118
- # ================================================================
119
- if steps[i]["assistant"]:
120
- asst_msg = copy.deepcopy(steps[i]["assistant"])
121
- # Prepend "Action: " using a clean reference
122
- content_node = asst_msg["content"][0]
123
- content_node["text"] = "Action: " + content_node["text"]
124
- processed_messages.append(asst_msg)
125
 
126
  return {"messages": processed_messages}
127
 
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  if __name__ == "__main__":
130
  from datasets import load_from_disk
131
  from transformers import AutoProcessor
 
11
  MOBILE_SYSTEM_PROMPT,
12
  )
13
 
14
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1 = ""
15
+
16
+ MOBILE_GROUNDING_SYSTEM_PROMPT_V1 = ""
17
+ MOBILE_SYSTEM_PROMPT_V1 = ""
18
+
19
+ MOBILE_GROUNDING_SYSTEM_PROMPT_V0 = ""
20
+ MOBILE_SYSTEM_PROMPT_V0 = ""
21
+
22
 
23
  @dataclasses.dataclass
24
+ class Qwen3VLBaseDataInterface(UnrolledContextDataInterface):
25
 
26
  history_n: int = 4
27
 
28
+ def _process_context(self, row: dict[str, Any], **kwargs) -> dict[str, Any]:
29
+ messages = copy.deepcopy(row["messages"])
 
30
 
31
+ # 1. Group User/Assistant pairs
32
+ steps = [
33
+ {"user": messages[i], "assistant": messages[i + 1] if i + 1 < len(messages) else None}
34
+ for i in range(0, len(messages), 2)
35
+ ]
 
 
 
36
 
37
+ # 2. Calculate truncation
 
 
38
  steps_to_keep = self.history_n + 1
39
+ truncate_count = max(0, len(steps) - steps_to_keep)
40
 
41
+ # 3. Extract Instruction (from Step 1) and create Summary
42
+ step1_text = next(
43
+ item["text"] for item in steps[0]["user"]["content"] if item.get("type") == "text"
 
 
 
 
 
 
44
  )
45
 
46
+ summary_lines = [
47
+ f"Step {i+1}: {step['assistant']['content'][0]['text']}"
48
+ for i, step in enumerate(steps[:truncate_count])
49
+ if step["assistant"]
50
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
51
  summary_block = "\n".join(summary_lines) if summary_lines else "None"
52
 
53
+ full_prompt = INSTRUCTION_PROMPT.format(
54
+ instruction=step1_text, previous_actions=summary_block
 
 
 
 
 
 
 
 
55
  )
56
 
57
+ # 4. Build new message list from remaining steps
58
  processed_messages = []
59
+ for i, step in enumerate(steps[truncate_count:]):
60
+ u_msg, a_msg = step["user"], step["assistant"]
61
+
62
+ # Handle User Message
63
+ if i == 0: # Anchor step: Inject full prompt
64
+ text_item = next((x for x in u_msg["content"] if x.get("type") == "text"), None)
65
+ if text_item:
66
+ text_item["text"] = full_prompt
67
+ else:
68
+ u_msg["content"].append({"type": "text", "text": full_prompt})
69
+ else: # Subsequent steps: Remove text, keep images only
70
+ u_msg["content"] = [x for x in u_msg["content"] if x.get("type") != "text"]
71
+
72
+ processed_messages.append(u_msg)
73
+
74
+ # Handle Assistant Message
75
+ if a_msg:
76
+ # Prepend "Action: " to the first content block
77
+ a_msg["content"][0]["text"] = "Action: " + a_msg["content"][0]["text"]
78
+ processed_messages.append(a_msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  return {"messages": processed_messages}
81
 
82
 
83
+ @dataclasses.dataclass
84
+ class Qwen3VLDesktopDataInterfaceV0(Qwen3VLBaseDataInterface):
85
+ grounding_system_prompt: str = DESKTOP_GROUNDING_SYSTEM_PROMPT_V0
86
+ trajectory_system_prompt: str = DESKTOP_TRAJECTORY_SYSTEM_PROMPT_V0
87
+
88
+ def _process_action(
89
+ self,
90
+ row: dict[str, Any],
91
+ ) -> dict[str, Any]:
92
+ # TODO: debug
93
+ messages = copy.deepcopy(row["messages"])
94
+ for message in messages:
95
+ if message["role"] == "assistant":
96
+ # TODO: pass tool calling results
97
+ if message["tool_calls"]["function"]["arguments"]["action"] == "hscroll":
98
+ message["tool_calls"]["function"]["arguments"]["action"] = "scroll"
99
+ elif message["tool_calls"]["function"]["arguments"]["action"] == "triple_click":
100
+ message["tool_calls"]["function"]["arguments"]["action"] = "double_click"
101
+ return {"messages": messages}
102
+
103
+
104
+ @dataclasses.dataclass
105
+ class Qwen3VLMobileDataInterfaceV0(Qwen3VLBaseDataInterface):
106
+
107
+ grounding_system_prompt: str = DESKTOP_GROUNDING_SYSTEM_PROMPT_V1
108
+ trajectory_system_prompt: str = DESKTOP_TRAJECTORY_SYSTEM_PROMPT_V1
109
+
110
+ def process_action(
111
+ self,
112
+ row: dict[str, Any],
113
+ ) -> dict[str, Any]:
114
+ # TODO: implement
115
+ pass
116
+
117
+
118
+ @dataclasses.dataclass
119
+ class Qwen3VLDesktopDataInterfaceV1(Qwen3VLBaseDataInterface):
120
+ grounding_system_prompt: str = DESKTOP_GROUNDING_SYSTEM_PROMPT_V1
121
+ trajectory_system_prompt: str = DESKTOP_SYSTEM_PROMPT_V1
122
+
123
+
124
+ @dataclasses.dataclass
125
+ class Qwen3VLMobileDataInterfaceV1(Qwen3VLBaseDataInterface):
126
+ grounding_system_prompt: str = MOBILE_GROUNDING_SYSTEM_PROMPT_V1
127
+ trajectory_system_prompt: str = MOBILE_SYSTEM_PROMPT_V1
128
+
129
+
130
+
131
  if __name__ == "__main__":
132
  from datasets import load_from_disk
133
  from transformers import AutoProcessor
cua_lite/data/interfaces/qwen3_vl/prompts/_desktop.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+
4
+ # from .common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
5
+ from cua_lite.data.interfaces.qwen3_vl.prompts.common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
6
+
7
+ DESKTOP_TRAJECTORY_DESCRIPTION = """Use a mouse and keyboard to interact with a computer, and take screenshots.
8
+ * This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
9
+ * Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. For example, if you click on Firefox and a window does not open, try waiting and then taking another screenshot.
10
+ * The screen resolution is 1000x1000.
11
+ * Whenever you intend to move the cursor to click on an element such as an icon, consult a screenshot first to determine the element's coordinates before moving the cursor.
12
+ * If you tried clicking on a program or link but it failed to load even after waiting, adjust your cursor position so that the tip of the cursor visually falls on the element you want to click.
13
+ * Make sure to click any buttons, links, icons, or other elements with the cursor tip in the center of the element. Do not click on edges unless explicitly instructed."""
14
+
15
+ # mention point and bbox
16
+ DESKTOP_GROUNDING_DESCRIPTION = """Given a computer screenshot, map the natural language instruction to an action or map the natural language reference to a point or a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
17
+ DESKTOP_GROUNDING_DESCRIPTION_ACTIONS = """Given a computer screenshot, map the natural language action to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
18
+ DESKTOP_GROUNDING_DESCRIPTION_POINT = """Given a computer screenshot, map the natural language reference to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
19
+ DESKTOP_GROUNDING_DESCRIPTION_BBOX = """Given a computer screenshot, map the natural language reference to a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
20
+
21
+ # v0: trajectory (https://github.com/xlang-ai/OSWorld/blob/main/mm_agents/qwen3vl_agent.py)
22
+ # DESKTOP_TRAJECTORY_ACTION_DESCRIPTION_V0 = """\
23
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0 = {
24
+ "type": "function",
25
+ "function": {
26
+ "name_for_human": "desktop_use",
27
+ "name": "desktop_use",
28
+ "description": DESKTOP_TRAJECTORY_DESCRIPTION,
29
+ "parameters": {
30
+ "properties": {
31
+ "action": {
32
+ "description": (
33
+ "* `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.\n"
34
+ + "* `type`: Type a string of text on the keyboard.\n"
35
+ + "* `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.\n"
36
+ + "* `left_click`: Click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
37
+ + "* `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.\n"
38
+ + "* `right_click`: Click the right mouse button at a specified (x, y) pixel coordinate on the screen.\n"
39
+ + "* `middle_click`: Click the middle mouse button at a specified (x, y) pixel coordinate on the screen.\n"
40
+ + "* `double_click`: Double-click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
41
+ + "* `triple_click`: Triple-click the left mouse button at a specified (x, y) pixel coordinate on the screen (simulated as double-click since it's the closest action).\n"
42
+ + "* `scroll`: Performs a scroll of the mouse scroll wheel.\n"
43
+ + "* `hscroll`: Performs a horizontal scroll (mapped to regular scroll).\n"
44
+ + "* `wait`: Wait specified seconds for the change to happen.\n"
45
+ + "* `terminate`: Terminate the current task and report its completion status.\n"
46
+ + "* `answer`: Answer a question."
47
+ ),
48
+ "enum": [
49
+ "key",
50
+ "type",
51
+ "mouse_move",
52
+ "left_click",
53
+ "left_click_drag",
54
+ "right_click",
55
+ "middle_click",
56
+ "double_click",
57
+ "scroll",
58
+ "wait",
59
+ "terminate",
60
+ ],
61
+ "type": "string",
62
+ },
63
+ "keys": {
64
+ "description": "Required only by `action=key`.",
65
+ "type": "array",
66
+ },
67
+ "text": {
68
+ "description": "Required only by `action=type`.",
69
+ "type": "string",
70
+ },
71
+ "coordinate": {
72
+ "description": "The x,y coordinates for mouse actions.",
73
+ "type": "array",
74
+ },
75
+ "pixels": {"description": "The amount of scrolling.", "type": "number"},
76
+ "time": {"description": "The seconds to wait.", "type": "number"},
77
+ "status": {
78
+ "description": "The status of the task.",
79
+ "type": "string",
80
+ "enum": ["success", "failure"],
81
+ },
82
+ },
83
+ "required": ["action"],
84
+ "type": "object",
85
+ },
86
+ "args_format": "Format the arguments as a JSON object.",
87
+ },
88
+ }
89
+ DESKTOP_TRAJECTORY_SYSTEM_PROMPT_V0 = TRAJECTORY_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0))
90
+
91
+ # v0: grounding
92
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0_ACTIONS = copy.deepcopy(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0)
93
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0_ACTIONS["function"]["description"] = DESKTOP_GROUNDING_DESCRIPTION_ACTIONS
94
+
95
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0_ACTIONS = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0))
96
+
97
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_POINT = {
98
+ "type": "function",
99
+ "function": {
100
+ "name_for_human": "point",
101
+ "name": "point",
102
+ "description": DESKTOP_GROUNDING_DESCRIPTION_POINT,
103
+ "parameters": {
104
+ "properties": {
105
+ "coordinate": {
106
+ "description": "A 2-element list [x, y] representing the screen coordinates for the center of the described object.",
107
+ "type": "array",
108
+ },
109
+ },
110
+ "required": ["coordinate"],
111
+ "type": "object",
112
+ },
113
+ "args_format": "Format the arguments as a JSON object.",
114
+ },
115
+ }
116
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0_POINT = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_POINT))
117
+
118
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_BBOX = {
119
+ "type": "function",
120
+ "function": {
121
+ "name_for_human": "bbox",
122
+ "name": "bbox",
123
+ "description": DESKTOP_GROUNDING_DESCRIPTION_BBOX,
124
+ "parameters": {
125
+ "properties": {
126
+ "coordinate": {
127
+ "description": "A 4-element list [x_min, y_min, x_max, y_max] defining the bounding box, where (x_min, y_min) is the top-left corner and (x_max, y_max) is the bottom-right corner.",
128
+ "type": "array",
129
+ },
130
+ },
131
+ "required": ["coordinate"],
132
+ "type": "object",
133
+ },
134
+ "args_format": "Format the arguments as a JSON object.",
135
+ },
136
+ }
137
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0_BBOX = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_BBOX))
138
+
139
+
140
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V0 = {
141
+ "type": "function",
142
+ "function": {
143
+ "name_for_human": "desktop_grounding",
144
+ "name": "desktop_grounding",
145
+ "description": DESKTOP_GROUNDING_DESCRIPTION,
146
+ "parameters": {
147
+ "properties": {
148
+ "action": {
149
+ "description": (
150
+ "* `point`: Return the [x, y] center coordinates of the object described.\n"
151
+ + "* `bbox`: Return the [x_min, y_min, x_max, y_max] bounding box of the object described.\n"
152
+ + "* `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.\n"
153
+ + "* `type`: Type a string of text on the keyboard.\n"
154
+ + "* `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.\n"
155
+ + "* `left_click`: Click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
156
+ + "* `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.\n"
157
+ + "* `right_click`: Click the right mouse button at a specified (x, y) pixel coordinate on the screen.\n"
158
+ + "* `middle_click`: Click the middle mouse button at a specified (x, y) pixel coordinate on the screen.\n"
159
+ + "* `double_click`: Double-click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
160
+ + "* `triple_click`: Triple-click the left mouse button at a specified (x, y) pixel coordinate on the screen (simulated as double-click since it's the closest action).\n"
161
+ + "* `scroll`: Performs a scroll of the mouse scroll wheel.\n"
162
+ + "* `hscroll`: Performs a horizontal scroll (mapped to regular scroll).\n"
163
+ + "* `wait`: Wait specified seconds for the change to happen.\n"
164
+ + "* `terminate`: Terminate the current task and report its completion status.\n"
165
+ + "* `answer`: Answer a question."
166
+ ),
167
+ "enum": [
168
+ "point",
169
+ "bbox",
170
+ "key",
171
+ "type",
172
+ "mouse_move",
173
+ "left_click",
174
+ "left_click_drag",
175
+ "right_click",
176
+ "middle_click",
177
+ "double_click",
178
+ "scroll",
179
+ "wait",
180
+ "terminate",
181
+ ],
182
+ "type": "string",
183
+ },
184
+ "keys": {
185
+ "description": "Required only by `action=key`.",
186
+ "type": "array",
187
+ },
188
+ "text": {
189
+ "description": "Required only by `action=type`.",
190
+ "type": "string",
191
+ },
192
+ "coordinate": {
193
+ "description": "The [x, y] coordinates for mouse actions/point, or [x_min, y_min, x_max, y_max] for bbox.",
194
+ "type": "array",
195
+ },
196
+ "pixels": {"description": "The amount of scrolling.", "type": "number"},
197
+ "time": {"description": "The seconds to wait.", "type": "number"},
198
+ "status": {
199
+ "description": "The status of the task.",
200
+ "type": "string",
201
+ "enum": ["success", "failure"],
202
+ },
203
+ },
204
+ "required": ["action"],
205
+ "type": "object",
206
+ },
207
+ "args_format": "Format the arguments as a JSON object.",
208
+ },
209
+ }
210
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0 = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V0))
211
+
212
+
213
+
214
+ # v1
215
+
216
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1 = {
217
+ "type": "function",
218
+ "function": {
219
+ "name_for_human": "desktop_use",
220
+ "name": "desktop_use",
221
+ "description": DESKTOP_TRAJECTORY_DESCRIPTION,
222
+ "parameters": {
223
+ "properties": {
224
+ "action": {
225
+ "description": (
226
+ "* `key(keys=[\"key1\", \"key2\"])`: Press the keys in order (key down), then release them in reverse order (key up).\n"
227
+ + "* `type(text=\"string\")`: Type the given text.\n"
228
+ + "* `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).\n"
229
+ + "* `left_click(coordinate=[x, y][, keys=[\"key\"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
230
+ + "* `left_click_drag(coordinate=[x, y][, keys=[\"key\"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.\n"
231
+ + "* `right_click(coordinate=[x, y][, keys=[\"key\"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
232
+ + "* `middle_click(coordinate=[x, y][, keys=[\"key\"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
233
+ + "* `double_click(coordinate=[x, y][, keys=[\"key\"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
234
+ + "* `triple_click(coordinate=[x, y][, keys=[\"key\"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
235
+ + "* `scroll(pixels=number[, keys=[\"key\"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.\n"
236
+ + "* `hscroll(pixels=number[, keys=[\"key\"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.\n"
237
+ + "* `wait(time=seconds)`: Wait for the given number of seconds.\n"
238
+ + "* `terminate(status=\"success\"|\"failure\")`: End the task and report the status.\n"
239
+ ),
240
+ "enum": [
241
+ "key",
242
+ "type",
243
+ "mouse_move",
244
+ "left_click",
245
+ "left_click_drag",
246
+ "right_click",
247
+ "middle_click",
248
+ "double_click",
249
+ "triple_click",
250
+ "scroll",
251
+ "hscroll",
252
+ "wait",
253
+ "terminate",
254
+ ],
255
+ "type": "string",
256
+ },
257
+ "keys": {
258
+ "description": (
259
+ "Keys to press/hold.\n"
260
+ "Required only for action: `key` (press in order, release in reverse).\n"
261
+ "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
262
+ ),
263
+ "type": "array",
264
+ },
265
+ "text": {
266
+ "description": "The text content to type or the answer to return. Required only for actions: `type`, `answer`.",
267
+ "type": "string",
268
+ },
269
+ "coordinate": {
270
+ "description": "The (x, y) pixel coordinate on the screen. Required only for actions: `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
271
+ "type": "array",
272
+ },
273
+ "pixels": {
274
+ "description": "The amount of scrolling to perform. Required only for actions: `scroll`, `hscroll`.",
275
+ "type": "number"
276
+ },
277
+ "time": {
278
+ "description": "The duration to wait in seconds. Required only for action: `wait`.",
279
+ "type": "number"
280
+ },
281
+ "status": {
282
+ "description": "The completion status of the task. Required only for action: `terminate`.",
283
+ "type": "string",
284
+ "enum": ["success", "failure"],
285
+ },
286
+ },
287
+ "required": ["action"],
288
+ "type": "object",
289
+ },
290
+ "args_format": "Format the arguments as a JSON object.",
291
+ },
292
+ }
293
+
294
+ DESKTOP_TRAJECTORY_SYSTEM_PROMPT_V1 = TRAJECTORY_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1))
295
+
296
+ # v1: grounding
297
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1_ACTIONS = copy.deepcopy(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1)
298
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1_ACTIONS["function"]["description"] = DESKTOP_GROUNDING_DESCRIPTION_ACTIONS
299
+
300
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1_ACTIONS = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1))
301
+
302
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_POINT = {
303
+ "type": "function",
304
+ "function": {
305
+ "name_for_human": "point",
306
+ "name": "point",
307
+ "description": DESKTOP_GROUNDING_DESCRIPTION_POINT,
308
+ "parameters": {
309
+ "properties": {
310
+ "coordinate": {
311
+ "description": "A 2-element list [x, y] representing the screen coordinates for the center of the described object.",
312
+ "type": "array",
313
+ },
314
+ },
315
+ "required": ["coordinate"],
316
+ "type": "object",
317
+ },
318
+ "args_format": "Format the arguments as a JSON object.",
319
+ },
320
+ }
321
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1_POINT = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_POINT))
322
+
323
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_BBOX = {
324
+ "type": "function",
325
+ "function": {
326
+ "name_for_human": "bbox",
327
+ "name": "bbox",
328
+ "description": DESKTOP_GROUNDING_DESCRIPTION_BBOX,
329
+ "parameters": {
330
+ "properties": {
331
+ "coordinate": {
332
+ "description": "A 4-element list [x_min, y_min, x_max, y_max] defining the bounding box, where (x_min, y_min) is the top-left corner and (x_max, y_max) is the bottom-right corner.",
333
+ "type": "array",
334
+ },
335
+ },
336
+ "required": ["coordinate"],
337
+ "type": "object",
338
+ },
339
+ "args_format": "Format the arguments as a JSON object.",
340
+ },
341
+ }
342
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1_BBOX = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_BBOX))
343
+
344
+
345
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V1 = {
346
+ "type": "function",
347
+ "function": {
348
+ "name_for_human": "desktop_grounding",
349
+ "name": "desktop_grounding",
350
+ "description": DESKTOP_GROUNDING_DESCRIPTION,
351
+ "parameters": {
352
+ "properties": {
353
+ "action": {
354
+ "description": (
355
+ "* `point`: Return the [x, y] center coordinates of the object described.\n"
356
+ + "* `bbox`: Return the [x_min, y_min, x_max, y_max] bounding box coordinates of the object described.\n"
357
+ + "* `key(keys=[\"key1\", \"key2\"])`: Press the keys in order (key down), then release them in reverse order (key up).\n"
358
+ + "* `type(text=\"string\")`: Type the given text.\n"
359
+ + "* `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).\n"
360
+ + "* `left_click(coordinate=[x, y][, keys=[\"key\"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
361
+ + "* `left_click_drag(coordinate=[x, y][, keys=[\"key\"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.\n"
362
+ + "* `right_click(coordinate=[x, y][, keys=[\"key\"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
363
+ + "* `middle_click(coordinate=[x, y][, keys=[\"key\"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
364
+ + "* `double_click(coordinate=[x, y][, keys=[\"key\"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
365
+ + "* `triple_click(coordinate=[x, y][, keys=[\"key\"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
366
+ + "* `scroll(pixels=number[, keys=[\"key\"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.\n"
367
+ + "* `hscroll(pixels=number[, keys=[\"key\"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.\n"
368
+ + "* `wait(time=seconds)`: Wait for the given number of seconds.\n"
369
+ + "* `terminate(status=\"success\"|\"failure\")`: End the task and report the status.\n"
370
+ + "* `answer(text=\"answer\")`: Return a final answer in text."
371
+ ),
372
+ "enum": [
373
+ "point",
374
+ "bbox",
375
+ "key",
376
+ "type",
377
+ "mouse_move",
378
+ "left_click",
379
+ "left_click_drag",
380
+ "right_click",
381
+ "middle_click",
382
+ "double_click",
383
+ "scroll",
384
+ "wait",
385
+ "terminate",
386
+ ],
387
+ "type": "string",
388
+ },
389
+ "keys": {
390
+ "description": (
391
+ "Keys to press/hold.\n"
392
+ "Required only for action: `key` (press in order, release in reverse).\n"
393
+ "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
394
+ ),
395
+ "type": "array",
396
+ },
397
+ "text": {
398
+ "description": "The text content to type or the answer to return. Required only for actions: `type`, `answer`.",
399
+ "type": "string",
400
+ },
401
+ "coordinate": {
402
+ "description": "The [x, y] coordinates for mouse actions/point, or [x_min, y_min, x_max, y_max] for bbox. Required only for actions: `point`, `bbox`, `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
403
+ "type": "array",
404
+ },
405
+ "pixels": {
406
+ "description": "The amount of scrolling to perform. Required only for actions: `scroll`, `hscroll`.",
407
+ "type": "number"
408
+ },
409
+ "time": {
410
+ "description": "The duration to wait in seconds. Required only for action: `wait`.",
411
+ "type": "number"
412
+ },
413
+ "status": {
414
+ "description": "The completion status of the task. Required only for action: `terminate`.",
415
+ "type": "string",
416
+ "enum": ["success", "failure"],
417
+ },
418
+ },
419
+ "required": ["action"],
420
+ "type": "object",
421
+ },
422
+ "args_format": "Format the arguments as a JSON object.",
423
+ },
424
+ }
425
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1 = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V1))
426
+
427
+
428
+
429
+ if __name__ == "__main__":
430
+ pass
431
+
cua_lite/data/interfaces/qwen3_vl/prompts/common.py CHANGED
@@ -1,38 +1,83 @@
1
- SYSTEM_PROMPT = (
2
- """# Tools
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  You may call one or more functions to assist with the user query.
5
 
6
  You are provided with function signatures within <tools></tools> XML tags:
7
  <tools>
8
- """
9
- + "{tools_definition}"
10
- + """
11
  </tools>
12
 
13
  For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
14
  <tool_call>
15
- {"name": <function-name>, "arguments": <args-json-object>}
16
  </tool_call>
17
 
18
  # Response format
19
 
20
  Response format for every step:
21
  1) Action: a short imperative describing what to do in the UI.
22
- 2) A single <tool_call>...</tool_call> block containing only the JSON: {"name": <function-name>, "arguments": <args-json-object>}.
23
 
24
  Rules:
25
  - Output exactly in the order: Action, <tool_call>.
26
  - Be brief: one sentence for Action.
27
  - Do not output anything else outside those parts.
28
  - If finishing, use action=terminate in the tool call."""
29
- )
30
 
31
 
32
- INSTRUCTION_PROMPT = """
 
 
33
  Please generate the next move according to the UI screenshot, instruction and previous actions.
34
 
35
  Instruction: {instruction}
36
 
37
  Previous actions:
38
  {previous_actions}"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TRAJECTORY_SYSTEM_PROMPT = (
2
+ # """# Tools
3
+
4
+ # You may call one or more functions to assist with the user query.
5
+
6
+ # You are provided with function signatures within <tools></tools> XML tags:
7
+ # <tools>
8
+ # """
9
+ # + "{tools_definition}"
10
+ # + """
11
+ # </tools>
12
+
13
+ # For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
14
+ # <tool_call>
15
+ # {"name": <function-name>, "arguments": <args-json-object>}
16
+ # </tool_call>
17
+
18
+ # # Response format
19
+
20
+ # Response format for every step:
21
+ # 1) Action: a short imperative describing what to do in the UI.
22
+ # 2) A single <tool_call>...</tool_call> block containing only the JSON: {"name": <function-name>, "arguments": <args-json-object>}.
23
+
24
+ # Rules:
25
+ # - Output exactly in the order: Action, <tool_call>.
26
+ # - Be brief: one sentence for Action.
27
+ # - Do not output anything else outside those parts.
28
+ # - If finishing, use action=terminate in the tool call."""
29
+ # )
30
+
31
+ TRAJECTORY_SYSTEM_PROMPT_TEMPLATE = """# Tools
32
 
33
  You may call one or more functions to assist with the user query.
34
 
35
  You are provided with function signatures within <tools></tools> XML tags:
36
  <tools>
37
+ {tools_definition}
 
 
38
  </tools>
39
 
40
  For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
41
  <tool_call>
42
+ {{"name": <function-name>, "arguments": <args-json-object>}}
43
  </tool_call>
44
 
45
  # Response format
46
 
47
  Response format for every step:
48
  1) Action: a short imperative describing what to do in the UI.
49
+ 2) A single <tool_call>...</tool_call> block containing only the JSON: {{"name": <function-name>, "arguments": <args-json-object>}}.
50
 
51
  Rules:
52
  - Output exactly in the order: Action, <tool_call>.
53
  - Be brief: one sentence for Action.
54
  - Do not output anything else outside those parts.
55
  - If finishing, use action=terminate in the tool call."""
 
56
 
57
 
58
+
59
+
60
+ TRAJECTORY_INSTRUCTION_PROMPT_TEMPLATE = """
61
  Please generate the next move according to the UI screenshot, instruction and previous actions.
62
 
63
  Instruction: {instruction}
64
 
65
  Previous actions:
66
  {previous_actions}"""
67
+
68
+
69
+
70
+
71
+ GROUNDING_SYSTEM_PROMPT_TEMPLATE = """# Tools
72
+
73
+ You may call one or more functions to assist with the user query.
74
+
75
+ You are provided with function signatures within <tools></tools> XML tags:
76
+ <tools>
77
+ {tools_definition}
78
+ </tools>
79
+
80
+ For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
81
+ <tool_call>
82
+ {{"name": <function-name>, "arguments": <args-json-object>}}
83
+ </tool_call>"""
cua_lite/data/interfaces/qwen3_vl/prompts/desktop.py CHANGED
@@ -1,45 +1,47 @@
 
1
  import json
2
 
3
- from .common import SYSTEM_PROMPT
 
4
 
5
-
6
- DESKTOP_DESCRIPTION_PROMPT = """Use a mouse and keyboard to interact with a computer, and take screenshots.
7
  * This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
8
  * Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. For example, if you click on Firefox and a window does not open, try waiting and then taking another screenshot.
9
  * The screen resolution is 1000x1000.
10
- * Whenever you intend to move the cursor to click on an element such as an icon, consult a screenshot first to determine the elements coordinates before moving the cursor.
11
  * If you tried clicking on a program or link but it failed to load even after waiting, adjust your cursor position so that the tip of the cursor visually falls on the element you want to click.
12
- * Make sure to click any buttons, links, icons, or other elements with the cursor tip in the center of the element. Do not click on edges unless explicitly instructed.\
13
- """
14
 
15
- # v0 (https://github.com/xlang-ai/OSWorld/blob/main/mm_agents/qwen3vl_agent.py)
16
- DESKTOP_ACTION_DESCRIPTION_PROMPT_V0 = """\
17
- * `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.
18
- * `type`: Type a string of text on the keyboard.
19
- * `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.
20
- * `left_click`: Click the left mouse button at a specified (x, y) pixel coordinate on the screen.
21
- * `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.
22
- * `right_click`: Click the right mouse button at a specified (x, y) pixel coordinate on the screen.
23
- * `middle_click`: Click the middle mouse button at a specified (x, y) pixel coordinate on the screen.
24
- * `double_click`: Double-click the left mouse button at a specified (x, y) pixel coordinate on the screen.
25
- * `triple_click`: Triple-click the left mouse button at a specified (x, y) pixel coordinate on the screen (simulated as double-click since it's the closest action).
26
- * `scroll`: Performs a scroll of the mouse scroll wheel.
27
- * `hscroll`: Performs a horizontal scroll (mapped to regular scroll).
28
- * `wait`: Wait specified seconds for the change to happen.
29
- * `terminate`: Terminate the current task and report its completion status.
30
- * `answer`: Answer a question.\
31
- """
32
 
33
- DESKTOP_TOOLS_DEFINITION_V0 = {
34
  "type": "function",
35
  "function": {
36
  "name_for_human": "desktop_use",
37
  "name": "desktop_use",
38
- "description": DESKTOP_DESCRIPTION_PROMPT,
39
  "parameters": {
40
  "properties": {
41
  "action": {
42
- "description": DESKTOP_ACTION_DESCRIPTION_PROMPT_V0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  "enum": [
44
  "key",
45
  "type",
@@ -49,28 +51,40 @@ DESKTOP_TOOLS_DEFINITION_V0 = {
49
  "right_click",
50
  "middle_click",
51
  "double_click",
 
52
  "scroll",
 
53
  "wait",
54
  "terminate",
55
  ],
56
  "type": "string",
57
  },
58
  "keys": {
59
- "description": "Required only by `action=key`.",
 
 
 
 
60
  "type": "array",
61
  },
62
  "text": {
63
- "description": "Required only by `action=type`.",
64
  "type": "string",
65
  },
66
  "coordinate": {
67
- "description": "The x,y coordinates for mouse actions.",
68
  "type": "array",
69
  },
70
- "pixels": {"description": "The amount of scrolling.", "type": "number"},
71
- "time": {"description": "The seconds to wait.", "type": "number"},
 
 
 
 
 
 
72
  "status": {
73
- "description": "The status of the task.",
74
  "type": "string",
75
  "enum": ["success", "failure"],
76
  },
@@ -81,39 +95,87 @@ DESKTOP_TOOLS_DEFINITION_V0 = {
81
  "args_format": "Format the arguments as a JSON object.",
82
  },
83
  }
 
 
 
 
 
 
 
84
 
85
- DESKTOP_SYSTEM_PROMPT_V0 = SYSTEM_PROMPT.format(tools_definition=json.dumps(DESKTOP_TOOLS_DEFINITION_V0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
- # v1
89
- DESKTOP_ACTION_DESCRIPTION_PROMPT_V1 = """\
90
- * `key(keys=["key1", "key2"])`: Press the keys in order (key down), then release them in reverse order (key up).
91
- * `type(text="string")`: Type the given text.
92
- * `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).
93
- * `left_click(coordinate=[x, y][, keys=["key"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.
94
- * `left_click_drag(coordinate=[x, y][, keys=["key"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.
95
- * `right_click(coordinate=[x, y][, keys=["key"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.
96
- * `middle_click(coordinate=[x, y][, keys=["key"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.
97
- * `double_click(coordinate=[x, y][, keys=["key"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.
98
- * `triple_click(coordinate=[x, y][, keys=["key"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.
99
- * `scroll(pixels=number[, keys=["key"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.
100
- * `hscroll(pixels=number[, keys=["key"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.
101
- * `wait(time=seconds)`: Wait for the given number of seconds.
102
- * `terminate(status="success"|"failure")`: End the task and report the status.
103
- * `answer(text="answer")`: Return a final answer in text.\
104
- """
105
 
106
- DESKTOP_TOOLS_DEFINITION_V1 = {
107
  "type": "function",
108
  "function": {
109
- "name_for_human": "desktop_use",
110
- "name": "desktop_use",
111
- "description": DESKTOP_DESCRIPTION_PROMPT,
112
  "parameters": {
113
  "properties": {
114
  "action": {
115
- "description": DESKTOP_ACTION_DESCRIPTION_PROMPT_V1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  "enum": [
 
 
117
  "key",
118
  "type",
119
  "mouse_move",
@@ -127,7 +189,6 @@ DESKTOP_TOOLS_DEFINITION_V1 = {
127
  "hscroll",
128
  "wait",
129
  "terminate",
130
- "answer"
131
  ],
132
  "type": "string",
133
  },
@@ -135,7 +196,7 @@ DESKTOP_TOOLS_DEFINITION_V1 = {
135
  "description": (
136
  "Keys to press/hold.\n"
137
  "Required only for action: `key` (press in order, release in reverse).\n"
138
- "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `double_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
139
  ),
140
  "type": "array",
141
  },
@@ -144,11 +205,11 @@ DESKTOP_TOOLS_DEFINITION_V1 = {
144
  "type": "string",
145
  },
146
  "coordinate": {
147
- "description": "The (x, y) pixel coordinate on the screen. Required only for actions: `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
148
  "type": "array",
149
  },
150
  "pixels": {
151
- "description": "The amount of scrolling to perform. Required only fors actions: `scroll`, `hscroll`.",
152
  "type": "number"
153
  },
154
  "time": {
@@ -167,5 +228,9 @@ DESKTOP_TOOLS_DEFINITION_V1 = {
167
  "args_format": "Format the arguments as a JSON object.",
168
  },
169
  }
 
 
 
 
 
170
 
171
- DESKTOP_SYSTEM_PROMPT_V1 = SYSTEM_PROMPT.format(tools_definition=json.dumps(DESKTOP_TOOLS_DEFINITION_V1))
 
1
+ import copy
2
  import json
3
 
4
+ # from .common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
5
+ from cua_lite.data.interfaces.qwen3_vl.prompts.common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
6
 
7
+ DESKTOP_TRAJECTORY_DESCRIPTION = """Use a mouse and keyboard to interact with a computer, and take screenshots.
 
8
  * This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
9
  * Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. For example, if you click on Firefox and a window does not open, try waiting and then taking another screenshot.
10
  * The screen resolution is 1000x1000.
11
+ * Whenever you intend to move the cursor to click on an element such as an icon, consult a screenshot first to determine the element's coordinates before moving the cursor.
12
  * If you tried clicking on a program or link but it failed to load even after waiting, adjust your cursor position so that the tip of the cursor visually falls on the element you want to click.
13
+ * Make sure to click any buttons, links, icons, or other elements with the cursor tip in the center of the element. Do not click on edges unless explicitly instructed."""
 
14
 
15
+ # mention point and bbox
16
+ DESKTOP_GROUNDING_DESCRIPTION = """Given a computer screenshot, map the natural language instruction to an action or map the natural language reference to a point or a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
17
+ # DESKTOP_GROUNDING_DESCRIPTION_ACTIONS = """Given a computer screenshot, map the natural language action to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
18
+ # DESKTOP_GROUNDING_DESCRIPTION_POINT = """Given a computer screenshot, map the natural language reference to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
19
+ # DESKTOP_GROUNDING_DESCRIPTION_BBOX = """Given a computer screenshot, map the natural language reference to a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION = {
22
  "type": "function",
23
  "function": {
24
  "name_for_human": "desktop_use",
25
  "name": "desktop_use",
26
+ "description": DESKTOP_TRAJECTORY_DESCRIPTION,
27
  "parameters": {
28
  "properties": {
29
  "action": {
30
+ "description": (
31
+ "* `key(keys=[\"key1\", \"key2\"])`: Press the keys in order (key down), then release them in reverse order (key up).\n"
32
+ + "* `type(text=\"string\")`: Type the given text.\n"
33
+ + "* `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).\n"
34
+ + "* `left_click(coordinate=[x, y][, keys=[\"key\"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
35
+ + "* `left_click_drag(coordinate=[x, y][, keys=[\"key\"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.\n"
36
+ + "* `right_click(coordinate=[x, y][, keys=[\"key\"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
37
+ + "* `middle_click(coordinate=[x, y][, keys=[\"key\"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
38
+ + "* `double_click(coordinate=[x, y][, keys=[\"key\"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
39
+ + "* `triple_click(coordinate=[x, y][, keys=[\"key\"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
40
+ + "* `scroll(pixels=number[, keys=[\"key\"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.\n"
41
+ + "* `hscroll(pixels=number[, keys=[\"key\"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.\n"
42
+ + "* `wait(time=seconds)`: Wait for the given number of seconds.\n"
43
+ + "* `terminate(status=\"success\"|\"failure\")`: End the task and report the status.\n"
44
+ ),
45
  "enum": [
46
  "key",
47
  "type",
 
51
  "right_click",
52
  "middle_click",
53
  "double_click",
54
+ "triple_click",
55
  "scroll",
56
+ "hscroll",
57
  "wait",
58
  "terminate",
59
  ],
60
  "type": "string",
61
  },
62
  "keys": {
63
+ "description": (
64
+ "Keys to press/hold.\n"
65
+ "Required only for action: `key` (press in order, release in reverse).\n"
66
+ "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
67
+ ),
68
  "type": "array",
69
  },
70
  "text": {
71
+ "description": "The text content to type or the answer to return. Required only for actions: `type`, `answer`.",
72
  "type": "string",
73
  },
74
  "coordinate": {
75
+ "description": "The (x, y) pixel coordinate on the screen. Required only for actions: `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
76
  "type": "array",
77
  },
78
+ "pixels": {
79
+ "description": "The amount of scrolling to perform. Required only for actions: `scroll`, `hscroll`.",
80
+ "type": "number"
81
+ },
82
+ "time": {
83
+ "description": "The duration to wait in seconds. Required only for action: `wait`.",
84
+ "type": "number"
85
+ },
86
  "status": {
87
+ "description": "The completion status of the task. Required only for action: `terminate`.",
88
  "type": "string",
89
  "enum": ["success", "failure"],
90
  },
 
95
  "args_format": "Format the arguments as a JSON object.",
96
  },
97
  }
98
+ DESKTOP_TRAJECTORY_SYSTEM_PROMPT = TRAJECTORY_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION ))
99
+
100
+ # grounding
101
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_ACTIONS = copy.deepcopy(DESKTOP_TRAJECTORY_TOOLS_DEFINITION)
102
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_ACTIONS["function"]["description"] = DESKTOP_GROUNDING_DESCRIPTION_ACTIONS
103
+
104
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_ACTIONS = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION))
105
 
106
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_POINT = {
107
+ "type": "function",
108
+ "function": {
109
+ "name_for_human": "point",
110
+ "name": "point",
111
+ "description": DESKTOP_GROUNDING_DESCRIPTION_POINT,
112
+ "parameters": {
113
+ "properties": {
114
+ "coordinate": {
115
+ "description": "A 2-element list [x, y] representing the screen coordinates for the center of the described object.",
116
+ "type": "array",
117
+ },
118
+ },
119
+ "required": ["coordinate"],
120
+ "type": "object",
121
+ },
122
+ "args_format": "Format the arguments as a JSON object.",
123
+ },
124
+ }
125
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_POINT = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_POINT))
126
 
127
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_BBOX = {
128
+ "type": "function",
129
+ "function": {
130
+ "name_for_human": "bbox",
131
+ "name": "bbox",
132
+ "description": DESKTOP_GROUNDING_DESCRIPTION_BBOX,
133
+ "parameters": {
134
+ "properties": {
135
+ "coordinate": {
136
+ "description": "A 4-element list [x_min, y_min, x_max, y_max] defining the bounding box, where (x_min, y_min) is the top-left corner and (x_max, y_max) is the bottom-right corner.",
137
+ "type": "array",
138
+ },
139
+ },
140
+ "required": ["coordinate"],
141
+ "type": "object",
142
+ },
143
+ "args_format": "Format the arguments as a JSON object.",
144
+ },
145
+ }
146
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_BBOX = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_BBOX))
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
+ DESKTOP_GROUNDING_TOOLS_DEFINITION = {
150
  "type": "function",
151
  "function": {
152
+ "name_for_human": "desktop_grounding",
153
+ "name": "desktop_grounding",
154
+ "description": DESKTOP_GROUNDING_DESCRIPTION,
155
  "parameters": {
156
  "properties": {
157
  "action": {
158
+ "description": (
159
+ "* `point`: Return the [x, y] center coordinates of the object described.\n"
160
+ + "* `bbox`: Return the [x_min, y_min, x_max, y_max] bounding box coordinates of the object described.\n"
161
+ + "* `key(keys=[\"key1\", \"key2\"])`: Press the keys in order (key down), then release them in reverse order (key up).\n"
162
+ + "* `type(text=\"string\")`: Type the given text.\n"
163
+ + "* `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).\n"
164
+ + "* `left_click(coordinate=[x, y][, keys=[\"key\"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
165
+ + "* `left_click_drag(coordinate=[x, y][, keys=[\"key\"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.\n"
166
+ + "* `right_click(coordinate=[x, y][, keys=[\"key\"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
167
+ + "* `middle_click(coordinate=[x, y][, keys=[\"key\"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
168
+ + "* `double_click(coordinate=[x, y][, keys=[\"key\"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
169
+ + "* `triple_click(coordinate=[x, y][, keys=[\"key\"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
170
+ + "* `scroll(pixels=number[, keys=[\"key\"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.\n"
171
+ + "* `hscroll(pixels=number[, keys=[\"key\"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.\n"
172
+ + "* `wait(time=seconds)`: Wait for the given number of seconds.\n"
173
+ + "* `terminate(status=\"success\"|\"failure\")`: End the task and report the status.\n"
174
+ + "* `answer(text=\"answer\")`: Return a final answer in text."
175
+ ),
176
  "enum": [
177
+ "point",
178
+ "bbox",
179
  "key",
180
  "type",
181
  "mouse_move",
 
189
  "hscroll",
190
  "wait",
191
  "terminate",
 
192
  ],
193
  "type": "string",
194
  },
 
196
  "description": (
197
  "Keys to press/hold.\n"
198
  "Required only for action: `key` (press in order, release in reverse).\n"
199
+ "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
200
  ),
201
  "type": "array",
202
  },
 
205
  "type": "string",
206
  },
207
  "coordinate": {
208
+ "description": "The [x, y] coordinates for mouse actions/point, or [x_min, y_min, x_max, y_max] for bbox. Required only for actions: `point`, `bbox`, `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
209
  "type": "array",
210
  },
211
  "pixels": {
212
+ "description": "The amount of scrolling to perform. Required only for actions: `scroll`, `hscroll`.",
213
  "type": "number"
214
  },
215
  "time": {
 
228
  "args_format": "Format the arguments as a JSON object.",
229
  },
230
  }
231
+ DESKTOP_GROUNDING_SYSTEM_PROMPT = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION))
232
+
233
+
234
+ if __name__ == "__main__":
235
+ pass
236
 
 
cua_lite/data/interfaces/qwen3_vl/prompts/version_0/desktop_v0.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+
4
+ # from .common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
5
+ from cua_lite.data.interfaces.qwen3_vl.prompts.common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
6
+
7
+ DESKTOP_TRAJECTORY_DESCRIPTION = """Use a mouse and keyboard to interact with a computer, and take screenshots.
8
+ * This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
9
+ * Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. For example, if you click on Firefox and a window does not open, try waiting and then taking another screenshot.
10
+ * The screen resolution is 1000x1000.
11
+ * Whenever you intend to move the cursor to click on an element such as an icon, consult a screenshot first to determine the element's coordinates before moving the cursor.
12
+ * If you tried clicking on a program or link but it failed to load even after waiting, adjust your cursor position so that the tip of the cursor visually falls on the element you want to click.
13
+ * Make sure to click any buttons, links, icons, or other elements with the cursor tip in the center of the element. Do not click on edges unless explicitly instructed."""
14
+
15
+ # mention point and bbox
16
+ DESKTOP_GROUNDING_DESCRIPTION = """Given a computer screenshot, map the natural language instruction to an action or map the natural language reference to a point or a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
17
+ DESKTOP_GROUNDING_DESCRIPTION_ACTIONS = """Given a computer screenshot, map the natural language action to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
18
+ DESKTOP_GROUNDING_DESCRIPTION_POINT = """Given a computer screenshot, map the natural language reference to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
19
+ DESKTOP_GROUNDING_DESCRIPTION_BBOX = """Given a computer screenshot, map the natural language reference to a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
20
+
21
+ # v0: trajectory (https://github.com/xlang-ai/OSWorld/blob/main/mm_agents/qwen3vl_agent.py)
22
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0 = {
23
+ "type": "function",
24
+ "function": {
25
+ "name_for_human": "desktop_use",
26
+ "name": "desktop_use",
27
+ "description": DESKTOP_TRAJECTORY_DESCRIPTION,
28
+ "parameters": {
29
+ "properties": {
30
+ "action": {
31
+ "description": (
32
+ "* `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.\n"
33
+ + "* `type`: Type a string of text on the keyboard.\n"
34
+ + "* `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.\n"
35
+ + "* `left_click`: Click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
36
+ + "* `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.\n"
37
+ + "* `right_click`: Click the right mouse button at a specified (x, y) pixel coordinate on the screen.\n"
38
+ + "* `middle_click`: Click the middle mouse button at a specified (x, y) pixel coordinate on the screen.\n"
39
+ + "* `double_click`: Double-click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
40
+ + "* `triple_click`: Triple-click the left mouse button at a specified (x, y) pixel coordinate on the screen (simulated as double-click since it's the closest action).\n"
41
+ + "* `scroll`: Performs a scroll of the mouse scroll wheel.\n"
42
+ + "* `hscroll`: Performs a horizontal scroll (mapped to regular scroll).\n"
43
+ + "* `wait`: Wait specified seconds for the change to happen.\n"
44
+ + "* `terminate`: Terminate the current task and report its completion status.\n"
45
+ + "* `answer`: Answer a question."
46
+ ),
47
+ "enum": [
48
+ "key",
49
+ "type",
50
+ "mouse_move",
51
+ "left_click",
52
+ "left_click_drag",
53
+ "right_click",
54
+ "middle_click",
55
+ "double_click",
56
+ "scroll",
57
+ "wait",
58
+ "terminate",
59
+ ],
60
+ "type": "string",
61
+ },
62
+ "keys": {
63
+ "description": "Required only by `action=key`.",
64
+ "type": "array",
65
+ },
66
+ "text": {
67
+ "description": "Required only by `action=type`.",
68
+ "type": "string",
69
+ },
70
+ "coordinate": {
71
+ "description": "The x,y coordinates for mouse actions.",
72
+ "type": "array",
73
+ },
74
+ "pixels": {"description": "The amount of scrolling.", "type": "number"},
75
+ "time": {"description": "The seconds to wait.", "type": "number"},
76
+ "status": {
77
+ "description": "The status of the task.",
78
+ "type": "string",
79
+ "enum": ["success", "failure"],
80
+ },
81
+ },
82
+ "required": ["action"],
83
+ "type": "object",
84
+ },
85
+ "args_format": "Format the arguments as a JSON object.",
86
+ },
87
+ }
88
+ DESKTOP_TRAJECTORY_SYSTEM_PROMPT_V0 = TRAJECTORY_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0))
89
+
90
+ # v0: grounding
91
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0_ACTIONS = copy.deepcopy(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0)
92
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0_ACTIONS["function"]["description"] = DESKTOP_GROUNDING_DESCRIPTION_ACTIONS
93
+
94
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0_ACTIONS = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V0))
95
+
96
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_POINT = {
97
+ "type": "function",
98
+ "function": {
99
+ "name_for_human": "point",
100
+ "name": "point",
101
+ "description": DESKTOP_GROUNDING_DESCRIPTION_POINT,
102
+ "parameters": {
103
+ "properties": {
104
+ "coordinate": {
105
+ "description": "A 2-element list [x, y] representing the screen coordinates for the center of the described object.",
106
+ "type": "array",
107
+ },
108
+ },
109
+ "required": ["coordinate"],
110
+ "type": "object",
111
+ },
112
+ "args_format": "Format the arguments as a JSON object.",
113
+ },
114
+ }
115
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0_POINT = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_POINT))
116
+
117
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_BBOX = {
118
+ "type": "function",
119
+ "function": {
120
+ "name_for_human": "bbox",
121
+ "name": "bbox",
122
+ "description": DESKTOP_GROUNDING_DESCRIPTION_BBOX,
123
+ "parameters": {
124
+ "properties": {
125
+ "coordinate": {
126
+ "description": "A 4-element list [x_min, y_min, x_max, y_max] defining the bounding box, where (x_min, y_min) is the top-left corner and (x_max, y_max) is the bottom-right corner.",
127
+ "type": "array",
128
+ },
129
+ },
130
+ "required": ["coordinate"],
131
+ "type": "object",
132
+ },
133
+ "args_format": "Format the arguments as a JSON object.",
134
+ },
135
+ }
136
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0_BBOX = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V0_BBOX))
137
+
138
+
139
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V0 = {
140
+ "type": "function",
141
+ "function": {
142
+ "name_for_human": "desktop_grounding",
143
+ "name": "desktop_grounding",
144
+ "description": DESKTOP_GROUNDING_DESCRIPTION,
145
+ "parameters": {
146
+ "properties": {
147
+ "action": {
148
+ "description": (
149
+ "* `point`: Return the [x, y] center coordinates of the object described.\n"
150
+ + "* `bbox`: Return the [x_min, y_min, x_max, y_max] bounding box of the object described.\n"
151
+ + "* `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.\n"
152
+ + "* `type`: Type a string of text on the keyboard.\n"
153
+ + "* `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.\n"
154
+ + "* `left_click`: Click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
155
+ + "* `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.\n"
156
+ + "* `right_click`: Click the right mouse button at a specified (x, y) pixel coordinate on the screen.\n"
157
+ + "* `middle_click`: Click the middle mouse button at a specified (x, y) pixel coordinate on the screen.\n"
158
+ + "* `double_click`: Double-click the left mouse button at a specified (x, y) pixel coordinate on the screen.\n"
159
+ + "* `triple_click`: Triple-click the left mouse button at a specified (x, y) pixel coordinate on the screen (simulated as double-click since it's the closest action).\n"
160
+ + "* `scroll`: Performs a scroll of the mouse scroll wheel.\n"
161
+ + "* `hscroll`: Performs a horizontal scroll (mapped to regular scroll).\n"
162
+ + "* `wait`: Wait specified seconds for the change to happen.\n"
163
+ + "* `terminate`: Terminate the current task and report its completion status.\n"
164
+ + "* `answer`: Answer a question."
165
+ ),
166
+ "enum": [
167
+ "point",
168
+ "bbox",
169
+ "key",
170
+ "type",
171
+ "mouse_move",
172
+ "left_click",
173
+ "left_click_drag",
174
+ "right_click",
175
+ "middle_click",
176
+ "double_click",
177
+ "scroll",
178
+ "wait",
179
+ "terminate",
180
+ ],
181
+ "type": "string",
182
+ },
183
+ "keys": {
184
+ "description": "Required only by `action=key`.",
185
+ "type": "array",
186
+ },
187
+ "text": {
188
+ "description": "Required only by `action=type`.",
189
+ "type": "string",
190
+ },
191
+ "coordinate": {
192
+ "description": "The [x, y] coordinates for mouse actions/point, or [x_min, y_min, x_max, y_max] for bbox.",
193
+ "type": "array",
194
+ },
195
+ "pixels": {"description": "The amount of scrolling.", "type": "number"},
196
+ "time": {"description": "The seconds to wait.", "type": "number"},
197
+ "status": {
198
+ "description": "The status of the task.",
199
+ "type": "string",
200
+ "enum": ["success", "failure"],
201
+ },
202
+ },
203
+ "required": ["action"],
204
+ "type": "object",
205
+ },
206
+ "args_format": "Format the arguments as a JSON object.",
207
+ },
208
+ }
209
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V0 = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V0))
cua_lite/data/interfaces/qwen3_vl/prompts/version_1/desktop_v1.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+
4
+ # from .common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
5
+ from cua_lite.data.interfaces.qwen3_vl.prompts.common import TRAJECTORY_SYSTEM_PROMPT_TEMPLATE, GROUNDING_SYSTEM_PROMPT_TEMPLATE
6
+
7
+ DESKTOP_TRAJECTORY_DESCRIPTION = """Use a mouse and keyboard to interact with a computer, and take screenshots.
8
+ * This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
9
+ * Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. For example, if you click on Firefox and a window does not open, try waiting and then taking another screenshot.
10
+ * The screen resolution is 1000x1000.
11
+ * Whenever you intend to move the cursor to click on an element such as an icon, consult a screenshot first to determine the element's coordinates before moving the cursor.
12
+ * If you tried clicking on a program or link but it failed to load even after waiting, adjust your cursor position so that the tip of the cursor visually falls on the element you want to click.
13
+ * Make sure to click any buttons, links, icons, or other elements with the cursor tip in the center of the element. Do not click on edges unless explicitly instructed."""
14
+
15
+ # mention point and bbox
16
+ DESKTOP_GROUNDING_DESCRIPTION = """Given a computer screenshot, map the natural language instruction to an action or map the natural language reference to a point or a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
17
+ DESKTOP_GROUNDING_DESCRIPTION_ACTIONS = """Given a computer screenshot, map the natural language action to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
18
+ DESKTOP_GROUNDING_DESCRIPTION_POINT = """Given a computer screenshot, map the natural language reference to a point in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
19
+ DESKTOP_GROUNDING_DESCRIPTION_BBOX = """Given a computer screenshot, map the natural language reference to a bounding box in the screenshot. This is an interface to a desktop GUI. The screen resolution is 1000x1000."""
20
+
21
+ # v1
22
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1 = {
23
+ "type": "function",
24
+ "function": {
25
+ "name_for_human": "desktop_use",
26
+ "name": "desktop_use",
27
+ "description": DESKTOP_TRAJECTORY_DESCRIPTION,
28
+ "parameters": {
29
+ "properties": {
30
+ "action": {
31
+ "description": (
32
+ "* `key(keys=[\"key1\", \"key2\"])`: Press the keys in order (key down), then release them in reverse order (key up).\n"
33
+ + "* `type(text=\"string\")`: Type the given text.\n"
34
+ + "* `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).\n"
35
+ + "* `left_click(coordinate=[x, y][, keys=[\"key\"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
36
+ + "* `left_click_drag(coordinate=[x, y][, keys=[\"key\"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.\n"
37
+ + "* `right_click(coordinate=[x, y][, keys=[\"key\"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
38
+ + "* `middle_click(coordinate=[x, y][, keys=[\"key\"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
39
+ + "* `double_click(coordinate=[x, y][, keys=[\"key\"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
40
+ + "* `triple_click(coordinate=[x, y][, keys=[\"key\"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
41
+ + "* `scroll(pixels=number[, keys=[\"key\"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.\n"
42
+ + "* `hscroll(pixels=number[, keys=[\"key\"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.\n"
43
+ + "* `wait(time=seconds)`: Wait for the given number of seconds.\n"
44
+ + "* `terminate(status=\"success\"|\"failure\")`: End the task and report the status.\n"
45
+ ),
46
+ "enum": [
47
+ "key",
48
+ "type",
49
+ "mouse_move",
50
+ "left_click",
51
+ "left_click_drag",
52
+ "right_click",
53
+ "middle_click",
54
+ "double_click",
55
+ "triple_click",
56
+ "scroll",
57
+ "hscroll",
58
+ "wait",
59
+ "terminate",
60
+ ],
61
+ "type": "string",
62
+ },
63
+ "keys": {
64
+ "description": (
65
+ "Keys to press/hold.\n"
66
+ "Required only for action: `key` (press in order, release in reverse).\n"
67
+ "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
68
+ ),
69
+ "type": "array",
70
+ },
71
+ "text": {
72
+ "description": "The text content to type or the answer to return. Required only for actions: `type`, `answer`.",
73
+ "type": "string",
74
+ },
75
+ "coordinate": {
76
+ "description": "The (x, y) pixel coordinate on the screen. Required only for actions: `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
77
+ "type": "array",
78
+ },
79
+ "pixels": {
80
+ "description": "The amount of scrolling to perform. Required only for actions: `scroll`, `hscroll`.",
81
+ "type": "number"
82
+ },
83
+ "time": {
84
+ "description": "The duration to wait in seconds. Required only for action: `wait`.",
85
+ "type": "number"
86
+ },
87
+ "status": {
88
+ "description": "The completion status of the task. Required only for action: `terminate`.",
89
+ "type": "string",
90
+ "enum": ["success", "failure"],
91
+ },
92
+ },
93
+ "required": ["action"],
94
+ "type": "object",
95
+ },
96
+ "args_format": "Format the arguments as a JSON object.",
97
+ },
98
+ }
99
+ DESKTOP_TRAJECTORY_SYSTEM_PROMPT_V1 = TRAJECTORY_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1))
100
+
101
+ # v1: grounding
102
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1_ACTIONS = copy.deepcopy(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1)
103
+ DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1_ACTIONS["function"]["description"] = DESKTOP_GROUNDING_DESCRIPTION_ACTIONS
104
+
105
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1_ACTIONS = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_TRAJECTORY_TOOLS_DEFINITION_V1))
106
+
107
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_POINT = {
108
+ "type": "function",
109
+ "function": {
110
+ "name_for_human": "point",
111
+ "name": "point",
112
+ "description": DESKTOP_GROUNDING_DESCRIPTION_POINT,
113
+ "parameters": {
114
+ "properties": {
115
+ "coordinate": {
116
+ "description": "A 2-element list [x, y] representing the screen coordinates for the center of the described object.",
117
+ "type": "array",
118
+ },
119
+ },
120
+ "required": ["coordinate"],
121
+ "type": "object",
122
+ },
123
+ "args_format": "Format the arguments as a JSON object.",
124
+ },
125
+ }
126
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1_POINT = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_POINT))
127
+
128
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_BBOX = {
129
+ "type": "function",
130
+ "function": {
131
+ "name_for_human": "bbox",
132
+ "name": "bbox",
133
+ "description": DESKTOP_GROUNDING_DESCRIPTION_BBOX,
134
+ "parameters": {
135
+ "properties": {
136
+ "coordinate": {
137
+ "description": "A 4-element list [x_min, y_min, x_max, y_max] defining the bounding box, where (x_min, y_min) is the top-left corner and (x_max, y_max) is the bottom-right corner.",
138
+ "type": "array",
139
+ },
140
+ },
141
+ "required": ["coordinate"],
142
+ "type": "object",
143
+ },
144
+ "args_format": "Format the arguments as a JSON object.",
145
+ },
146
+ }
147
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1_BBOX = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V1_BBOX))
148
+
149
+
150
+ DESKTOP_GROUNDING_TOOLS_DEFINITION_V1 = {
151
+ "type": "function",
152
+ "function": {
153
+ "name_for_human": "desktop_grounding",
154
+ "name": "desktop_grounding",
155
+ "description": DESKTOP_GROUNDING_DESCRIPTION,
156
+ "parameters": {
157
+ "properties": {
158
+ "action": {
159
+ "description": (
160
+ "* `point`: Return the [x, y] center coordinates of the object described.\n"
161
+ + "* `bbox`: Return the [x_min, y_min, x_max, y_max] bounding box coordinates of the object described.\n"
162
+ + "* `key(keys=[\"key1\", \"key2\"])`: Press the keys in order (key down), then release them in reverse order (key up).\n"
163
+ + "* `type(text=\"string\")`: Type the given text.\n"
164
+ + "* `mouse_move(coordinate=[x, y])`: Move the cursor to (x, y).\n"
165
+ + "* `left_click(coordinate=[x, y][, keys=[\"key\"]])`: Left click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
166
+ + "* `left_click_drag(coordinate=[x, y][, keys=[\"key\"]])`: Drag to (x, y) with the left mouse button. If `keys` is provided, hold those keys while dragging.\n"
167
+ + "* `right_click(coordinate=[x, y][, keys=[\"key\"]])`: Right click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
168
+ + "* `middle_click(coordinate=[x, y][, keys=[\"key\"]])`: Middle click at (x, y). If `keys` is provided, hold those keys while clicking.\n"
169
+ + "* `double_click(coordinate=[x, y][, keys=[\"key\"]])`: Double left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
170
+ + "* `triple_click(coordinate=[x, y][, keys=[\"key\"]])`: Triple left click at (x, y). If `keys` is provided, hold those keys while double-clicking.\n"
171
+ + "* `scroll(pixels=number[, keys=[\"key\"]])`: Scroll vertically. `pixels > 0` scrolls up, `pixels < 0` scrolls down. If `keys` is provided, hold those keys while scrolling.\n"
172
+ + "* `hscroll(pixels=number[, keys=[\"key\"]])`: Scroll horizontally. `pixels > 0` scrolls right, `pixels < 0` scrolls left. If `keys` is provided, hold those keys while scrolling.\n"
173
+ + "* `wait(time=seconds)`: Wait for the given number of seconds.\n"
174
+ + "* `terminate(status=\"success\"|\"failure\")`: End the task and report the status.\n"
175
+ + "* `answer(text=\"answer\")`: Return a final answer in text."
176
+ ),
177
+ "enum": [
178
+ "point",
179
+ "bbox",
180
+ "key",
181
+ "type",
182
+ "mouse_move",
183
+ "left_click",
184
+ "left_click_drag",
185
+ "right_click",
186
+ "middle_click",
187
+ "double_click",
188
+ "triple_click",
189
+ "scroll",
190
+ "hscroll",
191
+ "wait",
192
+ "terminate",
193
+ ],
194
+ "type": "string",
195
+ },
196
+ "keys": {
197
+ "description": (
198
+ "Keys to press/hold.\n"
199
+ "Required only for action: `key` (press in order, release in reverse).\n"
200
+ "Optional only for actions: `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`, `scroll`, `hscroll` (hold these keys while performing the action).\n"
201
+ ),
202
+ "type": "array",
203
+ },
204
+ "text": {
205
+ "description": "The text content to type or the answer to return. Required only for actions: `type`, `answer`.",
206
+ "type": "string",
207
+ },
208
+ "coordinate": {
209
+ "description": "The [x, y] coordinates for mouse actions/point, or [x_min, y_min, x_max, y_max] for bbox. Required only for actions: `point`, `bbox`, `mouse_move`, `left_click`, `left_click_drag`, `right_click`, `middle_click`, `double_click`, `triple_click`.",
210
+ "type": "array",
211
+ },
212
+ "pixels": {
213
+ "description": "The amount of scrolling to perform. Required only for actions: `scroll`, `hscroll`.",
214
+ "type": "number"
215
+ },
216
+ "time": {
217
+ "description": "The duration to wait in seconds. Required only for action: `wait`.",
218
+ "type": "number"
219
+ },
220
+ "status": {
221
+ "description": "The completion status of the task. Required only for action: `terminate`.",
222
+ "type": "string",
223
+ "enum": ["success", "failure"],
224
+ },
225
+ },
226
+ "required": ["action"],
227
+ "type": "object",
228
+ },
229
+ "args_format": "Format the arguments as a JSON object.",
230
+ },
231
+ }
232
+ DESKTOP_GROUNDING_SYSTEM_PROMPT_V1 = GROUNDING_SYSTEM_PROMPT_TEMPLATE.format(tools_definition=json.dumps(DESKTOP_GROUNDING_TOOLS_DEFINITION_V1))
233
+
234
+
235
+
236
+ if __name__ == "__main__":
237
+ pass
238
+
cua_lite/data/preproc/opencua/opencua.py CHANGED
@@ -65,16 +65,16 @@ class ScriptArguments:
65
  # -----------------------------
66
 
67
 
68
- def _make_computer_use_tool_call(arguments: Dict[str, Any]) -> Dict[str, Any]:
69
- """Wrap a computer_use tool call in Qwen3-VL tool_call structure."""
70
  if "action" not in arguments:
71
  raise ValueError(
72
- f"computer_use arguments must include 'action'. Got: {arguments}"
73
  )
74
  return {
75
  "type": "function",
76
  "function": {
77
- "name": "computer_use",
78
  "arguments": arguments,
79
  },
80
  }
@@ -153,7 +153,7 @@ def _norm01_to_0_1000(x: float, y: float) -> List[int]:
153
 
154
 
155
  def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
156
- """Convert AgentNet pyautogui/computer code string into Qwen3-VL computer_use tool calls."""
157
  if not isinstance(code, str) or not code.strip():
158
  raise AgentNetCodeParseError(f"Expected non-empty code string. Got: {code!r}")
159
 
@@ -177,7 +177,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
177
  if fname == "pyautogui.click":
178
  x, y = _extract_xy(call)
179
  tool_calls.append(
180
- _make_computer_use_tool_call(
181
  {"action": "left_click", "coordinate": _norm01_to_0_1000(x, y)}
182
  )
183
  )
@@ -186,7 +186,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
186
  if fname == "pyautogui.rightClick":
187
  x, y = _extract_xy(call)
188
  tool_calls.append(
189
- _make_computer_use_tool_call(
190
  {"action": "right_click", "coordinate": _norm01_to_0_1000(x, y)}
191
  )
192
  )
@@ -195,7 +195,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
195
  if fname == "pyautogui.middleClick":
196
  x, y = _extract_xy(call)
197
  tool_calls.append(
198
- _make_computer_use_tool_call(
199
  {"action": "middle_click", "coordinate": _norm01_to_0_1000(x, y)}
200
  )
201
  )
@@ -204,7 +204,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
204
  if fname == "pyautogui.doubleClick":
205
  x, y = _extract_xy(call)
206
  tool_calls.append(
207
- _make_computer_use_tool_call(
208
  {"action": "double_click", "coordinate": _norm01_to_0_1000(x, y)}
209
  )
210
  )
@@ -213,7 +213,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
213
  if fname in {"pyautogui.tripleClick", "computer.tripleClick"}:
214
  x, y = _extract_xy(call)
215
  tool_calls.append(
216
- _make_computer_use_tool_call(
217
  {"action": "triple_click", "coordinate": _norm01_to_0_1000(x, y)}
218
  )
219
  )
@@ -223,7 +223,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
223
  if fname == "pyautogui.moveTo":
224
  x, y = _extract_xy(call)
225
  tool_calls.append(
226
- _make_computer_use_tool_call(
227
  {"action": "mouse_move", "coordinate": _norm01_to_0_1000(x, y)}
228
  )
229
  )
@@ -238,7 +238,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
238
  )
239
  x, y = _extract_xy(call)
240
  tool_calls.append(
241
- _make_computer_use_tool_call(
242
  {"action": "left_click_drag", "coordinate": _norm01_to_0_1000(x, y)}
243
  )
244
  )
@@ -253,11 +253,11 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
253
  pixels = int(_literal_eval(call.args[0]))
254
  if fname == "pyautogui.scroll":
255
  tool_calls.append(
256
- _make_computer_use_tool_call({"action": "scroll", "pixels": pixels})
257
  )
258
  else:
259
  tool_calls.append(
260
- _make_computer_use_tool_call({"action": "hscroll", "pixels": pixels})
261
  )
262
  continue
263
 
@@ -274,7 +274,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
274
  )
275
  keys = [str(k).lower() for k in keys_val]
276
  tool_calls.append(
277
- _make_computer_use_tool_call({"action": "key", "keys": keys})
278
  )
279
  continue
280
 
@@ -287,13 +287,13 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
287
  if isinstance(key_val, (list, tuple)):
288
  for k in key_val:
289
  tool_calls.append(
290
- _make_computer_use_tool_call(
291
  {"action": "key", "keys": [str(k).lower()]}
292
  )
293
  )
294
  else:
295
  tool_calls.append(
296
- _make_computer_use_tool_call(
297
  {"action": "key", "keys": [str(key_val).lower()]}
298
  )
299
  )
@@ -309,7 +309,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
309
  )
310
  text = str(_literal_eval(msg_node))
311
  tool_calls.append(
312
- _make_computer_use_tool_call({"action": "type", "text": text})
313
  )
314
  continue
315
 
@@ -324,7 +324,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
324
  f"Unsupported wait signature.\ncode=\n{code}"
325
  )
326
  tool_calls.append(
327
- _make_computer_use_tool_call({"action": "wait", "time": t})
328
  )
329
  continue
330
 
@@ -340,7 +340,7 @@ def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
340
  f"Unsupported terminate status={status!r}.\ncode=\n{code}"
341
  )
342
  tool_calls.append(
343
- _make_computer_use_tool_call({"action": "terminate", "status": status})
344
  )
345
  continue
346
 
@@ -713,7 +713,7 @@ messages = [
713
  "content": [{"type": "text", "text": "{traj[0].value.action}"}],
714
  # Convert raw `{traj[0].value.code}` (pyauto) -> Qwen3-VL action space
715
  "tool_calls": [
716
- {"type": "function", "function": {"name": "computer_use", "arguments": "..."}}
717
  ],
718
  },
719
 
@@ -732,7 +732,7 @@ messages = [
732
  "content": [{"type": "text", "text": "{traj[1].value.action}"}],
733
  # Convert raw `{traj[1].value.code}` (pyauto) -> Qwen3-VL action space
734
  "tool_calls": [
735
- {"type": "function", "function": {"name": "computer_use", "arguments": "..."}}
736
  ],
737
  },
738
 
@@ -750,7 +750,7 @@ messages = [
750
  "content": [{"type": "text", "text": "{traj[n].value.action}"}],
751
  # Convert raw `{traj[n].value.code}` (pyauto) -> Qwen3-VL action space
752
  "tool_calls": [
753
- {"type": "function", "function": {"name": "computer_use", "arguments": "..."}}
754
  ],
755
  },
756
  ]
@@ -840,7 +840,7 @@ tools_def = {
840
 
841
  # AgentNet → Qwen3-VL Action Mapping
842
  This file documents how to convert AgentNet GUI actions (`traj[n].value.code`)
843
- into Qwen3-VL `computer_use` tool calls.
844
 
845
  Coordinates rule:
846
  - AgentNet uses normalized floats `[0, 1]`
@@ -858,7 +858,7 @@ pyautogui.click(x=0.018, y=0.508)
858
  "tool_calls": [{
859
  "type": "function",
860
  "function": {
861
- "name": "computer_use",
862
  "arguments": {
863
  "action": "left_click",
864
  "coordinate": [18, 508]
@@ -877,7 +877,7 @@ pyautogui.rightClick(x=0.304, y=0.684)
877
  "tool_calls": [{
878
  "type": "function",
879
  "function": {
880
- "name": "computer_use",
881
  "arguments": {
882
  "action": "right_click",
883
  "coordinate": [304, 684]
@@ -896,7 +896,7 @@ pyautogui.doubleClick(x=0.153, y=0.283)
896
  "tool_calls": [{
897
  "type": "function",
898
  "function": {
899
- "name": "computer_use",
900
  "arguments": {
901
  "action": "double_click",
902
  "coordinate": [153, 283]
@@ -915,7 +915,7 @@ computer.tripleClick(x=0.226, y=0.311)
915
  "tool_calls": [{
916
  "type": "function",
917
  "function": {
918
- "name": "computer_use",
919
  "arguments": {
920
  "action": "triple_click",
921
  "coordinate": [226, 311]
@@ -934,7 +934,7 @@ pyautogui.moveTo(x=0.394, y=0.604)
934
  "tool_calls": [{
935
  "type": "function",
936
  "function": {
937
- "name": "computer_use",
938
  "arguments": {
939
  "action": "mouse_move",
940
  "coordinate": [394, 604]
@@ -955,7 +955,7 @@ pyautogui.dragTo(x=0.085, y=0.361, button="left")
955
  {
956
  "type": "function",
957
  "function": {
958
- "name": "computer_use",
959
  "arguments": {
960
  "action": "mouse_move",
961
  "coordinate": [78, 381]
@@ -965,7 +965,7 @@ pyautogui.dragTo(x=0.085, y=0.361, button="left")
965
  {
966
  "type": "function",
967
  "function": {
968
- "name": "computer_use",
969
  "arguments": {
970
  "action": "left_click_drag",
971
  "coordinate": [85, 361]
@@ -985,7 +985,7 @@ pyautogui.scroll(-8)
985
  "tool_calls": [{
986
  "type": "function",
987
  "function": {
988
- "name": "computer_use",
989
  "arguments": {
990
  "action": "scroll",
991
  "pixels": -8
@@ -1004,7 +1004,7 @@ pyautogui.hotkey(["ctrl", "o"])
1004
  "tool_calls": [{
1005
  "type": "function",
1006
  "function": {
1007
- "name": "computer_use",
1008
  "arguments": {
1009
  "action": "key",
1010
  "keys": ["ctrl", "o"]
@@ -1023,7 +1023,7 @@ pyautogui.press("enter")
1023
  "tool_calls": [{
1024
  "type": "function",
1025
  "function": {
1026
- "name": "computer_use",
1027
  "arguments": {
1028
  "action": "key",
1029
  "keys": ["enter"]
@@ -1042,7 +1042,7 @@ pyautogui.write(message="cd ./Pictures")
1042
  "tool_calls": [{
1043
  "type": "function",
1044
  "function": {
1045
- "name": "computer_use",
1046
  "arguments": {
1047
  "action": "type",
1048
  "text": "cd ./Pictures"
@@ -1061,7 +1061,7 @@ computer.wait()
1061
  "tool_calls": [{
1062
  "type": "function",
1063
  "function": {
1064
- "name": "computer_use",
1065
  "arguments": {
1066
  "action": "wait",
1067
  "time": 1.0
@@ -1080,7 +1080,7 @@ computer.terminate(status="success")
1080
  "tool_calls": [{
1081
  "type": "function",
1082
  "function": {
1083
- "name": "computer_use",
1084
  "arguments": {
1085
  "action": "terminate",
1086
  "status": "success"
 
65
  # -----------------------------
66
 
67
 
68
+ def _make_desktop_use_tool_call(arguments: Dict[str, Any]) -> Dict[str, Any]:
69
+ """Wrap a desktop_use tool call in Qwen3-VL tool_call structure."""
70
  if "action" not in arguments:
71
  raise ValueError(
72
+ f"desktop_use arguments must include 'action'. Got: {arguments}"
73
  )
74
  return {
75
  "type": "function",
76
  "function": {
77
+ "name": "desktop_use",
78
  "arguments": arguments,
79
  },
80
  }
 
153
 
154
 
155
  def agentnet_code_to_qwen_tool_calls(code: str) -> List[Dict[str, Any]]:
156
+ """Convert AgentNet pyautogui/computer code string into Qwen3-VL desktop_use tool calls."""
157
  if not isinstance(code, str) or not code.strip():
158
  raise AgentNetCodeParseError(f"Expected non-empty code string. Got: {code!r}")
159
 
 
177
  if fname == "pyautogui.click":
178
  x, y = _extract_xy(call)
179
  tool_calls.append(
180
+ _make_desktop_use_tool_call(
181
  {"action": "left_click", "coordinate": _norm01_to_0_1000(x, y)}
182
  )
183
  )
 
186
  if fname == "pyautogui.rightClick":
187
  x, y = _extract_xy(call)
188
  tool_calls.append(
189
+ _make_desktop_use_tool_call(
190
  {"action": "right_click", "coordinate": _norm01_to_0_1000(x, y)}
191
  )
192
  )
 
195
  if fname == "pyautogui.middleClick":
196
  x, y = _extract_xy(call)
197
  tool_calls.append(
198
+ _make_desktop_use_tool_call(
199
  {"action": "middle_click", "coordinate": _norm01_to_0_1000(x, y)}
200
  )
201
  )
 
204
  if fname == "pyautogui.doubleClick":
205
  x, y = _extract_xy(call)
206
  tool_calls.append(
207
+ _make_desktop_use_tool_call(
208
  {"action": "double_click", "coordinate": _norm01_to_0_1000(x, y)}
209
  )
210
  )
 
213
  if fname in {"pyautogui.tripleClick", "computer.tripleClick"}:
214
  x, y = _extract_xy(call)
215
  tool_calls.append(
216
+ _make_desktop_use_tool_call(
217
  {"action": "triple_click", "coordinate": _norm01_to_0_1000(x, y)}
218
  )
219
  )
 
223
  if fname == "pyautogui.moveTo":
224
  x, y = _extract_xy(call)
225
  tool_calls.append(
226
+ _make_desktop_use_tool_call(
227
  {"action": "mouse_move", "coordinate": _norm01_to_0_1000(x, y)}
228
  )
229
  )
 
238
  )
239
  x, y = _extract_xy(call)
240
  tool_calls.append(
241
+ _make_desktop_use_tool_call(
242
  {"action": "left_click_drag", "coordinate": _norm01_to_0_1000(x, y)}
243
  )
244
  )
 
253
  pixels = int(_literal_eval(call.args[0]))
254
  if fname == "pyautogui.scroll":
255
  tool_calls.append(
256
+ _make_desktop_use_tool_call({"action": "scroll", "pixels": pixels})
257
  )
258
  else:
259
  tool_calls.append(
260
+ _make_desktop_use_tool_call({"action": "hscroll", "pixels": pixels})
261
  )
262
  continue
263
 
 
274
  )
275
  keys = [str(k).lower() for k in keys_val]
276
  tool_calls.append(
277
+ _make_desktop_use_tool_call({"action": "key", "keys": keys})
278
  )
279
  continue
280
 
 
287
  if isinstance(key_val, (list, tuple)):
288
  for k in key_val:
289
  tool_calls.append(
290
+ _make_desktop_use_tool_call(
291
  {"action": "key", "keys": [str(k).lower()]}
292
  )
293
  )
294
  else:
295
  tool_calls.append(
296
+ _make_desktop_use_tool_call(
297
  {"action": "key", "keys": [str(key_val).lower()]}
298
  )
299
  )
 
309
  )
310
  text = str(_literal_eval(msg_node))
311
  tool_calls.append(
312
+ _make_desktop_use_tool_call({"action": "type", "text": text})
313
  )
314
  continue
315
 
 
324
  f"Unsupported wait signature.\ncode=\n{code}"
325
  )
326
  tool_calls.append(
327
+ _make_desktop_use_tool_call({"action": "wait", "time": t})
328
  )
329
  continue
330
 
 
340
  f"Unsupported terminate status={status!r}.\ncode=\n{code}"
341
  )
342
  tool_calls.append(
343
+ _make_desktop_use_tool_call({"action": "terminate", "status": status})
344
  )
345
  continue
346
 
 
713
  "content": [{"type": "text", "text": "{traj[0].value.action}"}],
714
  # Convert raw `{traj[0].value.code}` (pyauto) -> Qwen3-VL action space
715
  "tool_calls": [
716
+ {"type": "function", "function": {"name": "desktop_use", "arguments": "..."}}
717
  ],
718
  },
719
 
 
732
  "content": [{"type": "text", "text": "{traj[1].value.action}"}],
733
  # Convert raw `{traj[1].value.code}` (pyauto) -> Qwen3-VL action space
734
  "tool_calls": [
735
+ {"type": "function", "function": {"name": "desktop_use", "arguments": "..."}}
736
  ],
737
  },
738
 
 
750
  "content": [{"type": "text", "text": "{traj[n].value.action}"}],
751
  # Convert raw `{traj[n].value.code}` (pyauto) -> Qwen3-VL action space
752
  "tool_calls": [
753
+ {"type": "function", "function": {"name": "desktop_use", "arguments": "..."}}
754
  ],
755
  },
756
  ]
 
840
 
841
  # AgentNet → Qwen3-VL Action Mapping
842
  This file documents how to convert AgentNet GUI actions (`traj[n].value.code`)
843
+ into Qwen3-VL `desktop_use` tool calls.
844
 
845
  Coordinates rule:
846
  - AgentNet uses normalized floats `[0, 1]`
 
858
  "tool_calls": [{
859
  "type": "function",
860
  "function": {
861
+ "name": "desktop_use",
862
  "arguments": {
863
  "action": "left_click",
864
  "coordinate": [18, 508]
 
877
  "tool_calls": [{
878
  "type": "function",
879
  "function": {
880
+ "name": "desktop_use",
881
  "arguments": {
882
  "action": "right_click",
883
  "coordinate": [304, 684]
 
896
  "tool_calls": [{
897
  "type": "function",
898
  "function": {
899
+ "name": "desktop_use",
900
  "arguments": {
901
  "action": "double_click",
902
  "coordinate": [153, 283]
 
915
  "tool_calls": [{
916
  "type": "function",
917
  "function": {
918
+ "name": "desktop_use",
919
  "arguments": {
920
  "action": "triple_click",
921
  "coordinate": [226, 311]
 
934
  "tool_calls": [{
935
  "type": "function",
936
  "function": {
937
+ "name": "desktop_use",
938
  "arguments": {
939
  "action": "mouse_move",
940
  "coordinate": [394, 604]
 
955
  {
956
  "type": "function",
957
  "function": {
958
+ "name": "desktop_use",
959
  "arguments": {
960
  "action": "mouse_move",
961
  "coordinate": [78, 381]
 
965
  {
966
  "type": "function",
967
  "function": {
968
+ "name": "desktop_use",
969
  "arguments": {
970
  "action": "left_click_drag",
971
  "coordinate": [85, 361]
 
985
  "tool_calls": [{
986
  "type": "function",
987
  "function": {
988
+ "name": "desktop_use",
989
  "arguments": {
990
  "action": "scroll",
991
  "pixels": -8
 
1004
  "tool_calls": [{
1005
  "type": "function",
1006
  "function": {
1007
+ "name": "desktop_use",
1008
  "arguments": {
1009
  "action": "key",
1010
  "keys": ["ctrl", "o"]
 
1023
  "tool_calls": [{
1024
  "type": "function",
1025
  "function": {
1026
+ "name": "desktop_use",
1027
  "arguments": {
1028
  "action": "key",
1029
  "keys": ["enter"]
 
1042
  "tool_calls": [{
1043
  "type": "function",
1044
  "function": {
1045
+ "name": "desktop_use",
1046
  "arguments": {
1047
  "action": "type",
1048
  "text": "cd ./Pictures"
 
1061
  "tool_calls": [{
1062
  "type": "function",
1063
  "function": {
1064
+ "name": "desktop_use",
1065
  "arguments": {
1066
  "action": "wait",
1067
  "time": 1.0
 
1080
  "tool_calls": [{
1081
  "type": "function",
1082
  "function": {
1083
+ "name": "desktop_use",
1084
  "arguments": {
1085
  "action": "terminate",
1086
  "status": "success"
cua_lite/data/utils.py CHANGED
@@ -1,5 +1,5 @@
1
  from collections import defaultdict
2
- from typing import Callable, Any
3
 
4
 
5
  def clean_nones(item):
@@ -14,15 +14,8 @@ def clean_nones(item):
14
  return item
15
 
16
 
17
- # # 2. 定义 Transform 函数(适配 Dataset 的 batch 格式)
18
- # def transform_batch(batch):
19
- # # batch 是一个字典,例如 {'messages': [ [...], [...] ]}
20
- # # 我们需要对里面的每一个样本进行清洗
21
- # return {k: [clean_nones(item) for item in v] for k, v in batch.items()}
22
-
23
-
24
  def batch_proc(
25
- func: Callable[[dict[str, Any]], dict[str, Any]], batch: dict[str, list[Any]]
26
  ) -> dict[str, list[Any]]:
27
  """
28
  Core reusable logic:
@@ -47,10 +40,58 @@ def batch_proc(
47
 
48
  # 3. Call the processing function
49
  # Expected to return a dict, e.g., {'messages': ..., 'status': ..., 'meta': ...}
50
- processed_row = func(row)
51
 
52
  # 4. Aggregate ALL keys from the result
53
  for key, value in processed_row.items():
54
  output_batch[key].append(value)
55
 
56
  return dict(output_batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from collections import defaultdict
2
+ from typing import Any, Callable, Mapping, Iterable
3
 
4
 
5
  def clean_nones(item):
 
14
  return item
15
 
16
 
 
 
 
 
 
 
 
17
  def batch_proc(
18
+ func: Callable[[dict[str, Any]], dict[str, Any]], batch: dict[str, list[Any]], **kwargs
19
  ) -> dict[str, list[Any]]:
20
  """
21
  Core reusable logic:
 
40
 
41
  # 3. Call the processing function
42
  # Expected to return a dict, e.g., {'messages': ..., 'status': ..., 'meta': ...}
43
+ processed_row = func(row, **kwargs)
44
 
45
  # 4. Aggregate ALL keys from the result
46
  for key, value in processed_row.items():
47
  output_batch[key].append(value)
48
 
49
  return dict(output_batch)
50
+
51
+
52
+ # def batch_proc(
53
+ # func: Callable[[dict[str, Any]], Mapping[str, Any]],
54
+ # batch: dict[str, list[Any]],
55
+ # *,
56
+ # flatten_list_values: bool = False,
57
+ # validate_batch: bool = True,
58
+ # **kwargs,
59
+ # ) -> dict[str, list[Any]]:
60
+ # """
61
+ # Columnar batch -> row-wise func -> columnar aggregation.
62
+
63
+ # Assumes func(row, **kwargs) returns a dict[str, Any | list[Any]].
64
+ # """
65
+
66
+ # if not batch:
67
+ # return {}
68
+
69
+ # first_key = next(iter(batch))
70
+ # batch_size = len(batch[first_key])
71
+
72
+ # if validate_batch:
73
+ # for k, col in batch.items():
74
+ # if len(col) != batch_size:
75
+ # raise ValueError(
76
+ # f"Column length mismatch: '{first_key}' has {batch_size}, "
77
+ # f"but '{k}' has {len(col)}."
78
+ # )
79
+
80
+ # output_batch = defaultdict(list)
81
+
82
+ # for i in range(batch_size):
83
+ # row = {key: batch[key][i] for key in batch}
84
+
85
+ # processed_row = func(row, **kwargs)
86
+ # if not isinstance(processed_row, Mapping):
87
+ # raise TypeError(
88
+ # f"func must return Mapping[str, Any], got {type(processed_row).__name__}"
89
+ # )
90
+
91
+ # for key, value in processed_row.items():
92
+ # if flatten_list_values and isinstance(value, (list, tuple)):
93
+ # output_batch[key].extend(value)
94
+ # else:
95
+ # output_batch[key].append(value)
96
+
97
+ # return dict(output_batch)