================ @@ -224,99 +265,155 @@ def collect_output(self, category, timeout_secs, pattern, clear=True): break return collected_output if collected_output else None - def _enqueue_recv_packet(self, packet: Optional[ProtocolMessage]): - self.recv_condition.acquire() + def _enqueue_recv_packet(self, packet: Union[ProtocolMessage, EOFError]): self.recv_packets.append(packet) self.recv_condition.notify() - self.recv_condition.release() - def _handle_recv_packet(self, packet: Optional[ProtocolMessage]) -> bool: + def _handle_recv_packet(self, packet: _InternalProtocolMessage) -> bool: """Called by the read thread that is waiting for all incoming packets to store the incoming packet in "self.recv_packets" in a thread safe way. This function will then signal the "self.recv_condition" to indicate a new packet is available. Returns True if the caller should keep calling this function for more packets. """ - # If EOF, notify the read thread by enqueuing a None. - if not packet: - self._enqueue_recv_packet(None) - return False - - # Check the packet to see if is an event packet - keepGoing = True - packet_type = packet["type"] - if packet_type == "event": - event = packet["event"] - body = None - if "body" in packet: - body = packet["body"] - # Handle the event packet and cache information from these packets - # as they come in - if event == "output": - # Store any output we receive so clients can retrieve it later. - category = body["category"] - output = body["output"] - self.output_condition.acquire() - if category in self.output: - self.output[category] += output - else: - self.output[category] = output - self.output_condition.notify() - self.output_condition.release() - # no need to add 'output' event packets to our packets list - return keepGoing - elif event == "initialized": - self.initialized = True - elif event == "process": - # When a new process is attached or launched, remember the - # details that are available in the body of the event - self.process_event_body = body - elif event == "exited": - # Process exited, mark the status to indicate the process is not - # alive. - self.exit_status = body["exitCode"] - elif event == "continued": - # When the process continues, clear the known threads and - # thread_stop_reasons. - all_threads_continued = body.get("allThreadsContinued", True) - tid = body["threadId"] - if tid in self.thread_stop_reasons: - del self.thread_stop_reasons[tid] - self._process_continued(all_threads_continued) - elif event == "stopped": - # Each thread that stops with a reason will send a - # 'stopped' event. We need to remember the thread stop - # reasons since the 'threads' command doesn't return - # that information. - self._process_stopped() - tid = body["threadId"] - self.thread_stop_reasons[tid] = body - elif event.startswith("progress"): - # Progress events come in as 'progressStart', 'progressUpdate', - # and 'progressEnd' events. Keep these around in case test - # cases want to verify them. - self.progress_events.append(packet) - - elif packet_type == "response": - if packet["command"] == "disconnect": - keepGoing = False - self._enqueue_recv_packet(packet) - return keepGoing + # Hold the recv_condition for consistency of debugger state. + with self.recv_condition: + if isinstance(packet, EOFError): + self._enqueue_recv_packet(packet) + return False + + keep_going = True + + # Check the packet to see if is an event packet + if packet["type"] == "event" and "event" in packet: + event = packet["event"] + body = packet.get("body") + # Handle the event packet and cache DAP stateful information from + # these packets as they come in. + if event == "output" and body is not None: + # Store any output we receive so clients can retrieve it later. + category = body["category"] + output = body["output"] + self.output_condition.acquire() + if category in self.output: + self.output[category] += output + else: + self.output[category] = output + self.output_condition.notify() + self.output_condition.release() + # no need to add 'output' event packets to our packets list + return keep_going + elif event == "initialized": + self.initialized = True + elif event == "process" and body is not None: + # When a new process is attached or launched, remember the + # details that are available in the body of the event + self.process_event_body = body + elif event == "terminated": + # If we get the 'terminated' event then lldb-dap has exited + # itself. + self.terminated = True + elif event == "exited" and body is not None: + # Process exited, mark the status to indicate the process is not + # alive. + self.exit_status = body.get("exitCode", 0) + elif event == "continued" and body is not None: + # When the process continues, clear the known threads and + # thread_stop_reasons. + all_threads_continued = body.get("allThreadsContinued", True) + tid = body["threadId"] + if tid in self.thread_stop_reasons: + del self.thread_stop_reasons[tid] + self._process_continued(all_threads_continued) + elif event == "stopped" and body is not None: + # Each thread that stops with a reason will send a + # 'stopped' event. We need to remember the thread stop + # reasons since the 'threads' command doesn't return + # that information. + self._process_stopped() + tid = body["threadId"] + self.thread_stop_reasons[tid] = body + elif event.startswith("progress"): + # Progress events come in as 'progressStart', 'progressUpdate', + # and 'progressEnd' events. Keep these around in case test + # cases want to verify them. + self.progress_events.append(packet) + + elif packet["type"] == "response": + if packet["command"] == "disconnect": + keep_going = False + + elif packet["type"] == "request": + # Handle reverse requests and keep processing. + self._handle_reverse_request(packet) + return keep_going + + self._enqueue_recv_packet(packet) + return keep_going def _process_continued(self, all_threads_continued: bool): self.threads = None self.frame_scopes = {} if all_threads_continued: self.thread_stop_reasons = {} - def send_packet(self, command_dict: Request, set_sequence=True): + def _handle_reverse_request(self, request: Request): + self.reverse_requests.append(request) + arguments = request.get("arguments") + if request["command"] == "runInTerminal" and arguments is not None: + in_shell = arguments.get("argsCanBeInterpretedByShell", False) + proc = subprocess.Popen( + arguments["args"], + env=arguments.get("env", {}), + cwd=arguments["cwd"], + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=in_shell, + ) + body = {} + if in_shell: + body["shellProcessId"] = proc.pid + else: + body["processId"] = proc.pid + self.send_packet( + { + "type": "response", + "seq": 0, + "request_seq": request["seq"], + "success": True, + "command": "runInTerminal", + "message": None, + "body": body, + } + ) + elif request["command"] == "startDebugging": + self.send_packet( + { + "type": "response", + "seq": 0, + "request_seq": request["seq"], + "success": True, + "message": None, + "command": "startDebugging", + "body": {}, + } + ) + else: + desc = 'unknown reverse request "%s"' % (request["command"]) + raise ValueError(desc) + + def send_packet(self, command_dict: ProtocolMessage) -> int: """Take the "command_dict" python dictionary and encode it as a JSON string and send the contents as a packet to the VSCode debug - adapter""" + adapter.""" + seq = 0 # Set the sequence ID for this command automatically - if set_sequence: - command_dict["seq"] = self.sequence + if command_dict["type"] == "request": + seq = command_dict["seq"] = self.sequence self.sequence += 1 + else: + command_dict["seq"] = 0 ---------------- ashgti wrote:
Updated with a slightly different version, LMKWYT https://github.com/llvm/llvm-project/pull/141689 _______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits