[Lldb-commits] [PATCH] D94357: [NFC] Add some additional, unconditional, logging to debugserver mostly related to app launching/attaching

2021-01-11 Thread Jason Molenda via Phabricator via lldb-commits
This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rGedde2eb1d209: Add unconditional logging to debugserver for 
launch/attach processes (authored by jasonmolenda).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D94357/new/

https://reviews.llvm.org/D94357

Files:
  lldb/tools/debugserver/source/MacOSX/MachProcess.mm
  lldb/tools/debugserver/source/MacOSX/MachTask.mm
  lldb/tools/debugserver/source/RNBRemote.cpp
  lldb/tools/debugserver/source/debugserver.cpp

Index: lldb/tools/debugserver/source/debugserver.cpp
===
--- lldb/tools/debugserver/source/debugserver.cpp
+++ lldb/tools/debugserver/source/debugserver.cpp
@@ -583,29 +583,34 @@
   }
 
   if (set_events & RNBContext::event_proc_thread_exiting) {
+DNBLog("debugserver's process monitoring thread has exited.");
 mode = eRNBRunLoopModeExit;
   }
 
   if (set_events & RNBContext::event_read_thread_exiting) {
 // Out remote packet receiving thread exited, exit for now.
+DNBLog(
+"debugserver's packet communication to lldb has been shut down.");
 if (ctx.HasValidProcessID()) {
+  nub_process_t pid = ctx.ProcessID();
   // TODO: We should add code that will leave the current process
   // in its current state and listen for another connection...
   if (ctx.ProcessStateRunning()) {
 if (ctx.GetDetachOnError()) {
-  DNBLog("debugserver's event read thread is exiting, detaching "
- "from the inferior process.");
-  DNBProcessDetach(ctx.ProcessID());
+  DNBLog("debugserver has a valid PID %d, it is still running. "
+ "detaching from the inferior process.",
+ pid);
+  DNBProcessDetach(pid);
 } else {
-  DNBLog("debugserver's event read thread is exiting, killing the "
- "inferior process.");
-  DNBProcessKill(ctx.ProcessID());
+  DNBLog("debugserver killing the inferior process, pid %d.", pid);
+  DNBProcessKill(pid);
 }
   } else {
 if (ctx.GetDetachOnError()) {
-  DNBLog("debugserver's event read thread is exiting, detaching "
- "from the inferior process.");
-  DNBProcessDetach(ctx.ProcessID());
+  DNBLog("debugserver has a valid PID %d but it may no longer "
+ "be running, detaching from the inferior process.",
+ pid);
+  DNBProcessDetach(pid);
 }
   }
 }
@@ -1104,21 +1109,30 @@
   if (optarg && optarg[0]) {
 if (strcasecmp(optarg, "auto") == 0)
   g_launch_flavor = eLaunchFlavorDefault;
-else if (strcasestr(optarg, "posix") == optarg)
+else if (strcasestr(optarg, "posix") == optarg) {
+  DNBLog(
+  "[LaunchAttach] launch flavor is posix_spawn via cmdline option");
   g_launch_flavor = eLaunchFlavorPosixSpawn;
-else if (strcasestr(optarg, "fork") == optarg)
+} else if (strcasestr(optarg, "fork") == optarg)
   g_launch_flavor = eLaunchFlavorForkExec;
 #ifdef WITH_SPRINGBOARD
-else if (strcasestr(optarg, "spring") == optarg)
+else if (strcasestr(optarg, "spring") == optarg) {
+  DNBLog(
+  "[LaunchAttach] launch flavor is SpringBoard via cmdline option");
   g_launch_flavor = eLaunchFlavorSpringBoard;
+}
 #endif
 #ifdef WITH_BKS
-else if (strcasestr(optarg, "backboard") == optarg)
+else if (strcasestr(optarg, "backboard") == optarg) {
+  DNBLog("[LaunchAttach] launch flavor is BKS via cmdline option");
   g_launch_flavor = eLaunchFlavorBKS;
+}
 #endif
 #ifdef WITH_FBS
-else if (strcasestr(optarg, "frontboard") == optarg)
+else if (strcasestr(optarg, "frontboard") == optarg) {
+  DNBLog("[LaunchAttach] launch flavor is FBS via cmdline option");
   g_launch_flavor = eLaunchFlavorFBS;
+}
 #endif
 
 else {
@@ -1398,6 +1412,7 @@
 dup2(null, STDOUT_FILENO);
 dup2(null, STDERR_FILENO);
   } else if (g_applist_opt != 0) {
+DNBLog("debugserver running in --applist mode");
 // List all applications we are able to see
 std::string applist_plist;
 int err = ListApplications(applist_plist, false, false);
@@ -1455,6 +1470,7 @@
 mode = eRNBRunLoopModeExit;
   } else if (g_applist_opt != 0) {
 // List all applications we are able to see
+DNBLog("debugserver running in applist mode under lockdown");
 std::string applist_plist;
 if (ListApplications(applist_plist, false, 

[Lldb-commits] [lldb] edde2eb - Add unconditional logging to debugserver for launch/attach processes

2021-01-11 Thread Jason Molenda via lldb-commits

Author: Jason Molenda
Date: 2021-01-11T22:17:10-08:00
New Revision: edde2eb1d2093905a2cb6166e6a60f9cc04c2bbc

URL: 
https://github.com/llvm/llvm-project/commit/edde2eb1d2093905a2cb6166e6a60f9cc04c2bbc
DIFF: 
https://github.com/llvm/llvm-project/commit/edde2eb1d2093905a2cb6166e6a60f9cc04c2bbc.diff

LOG: Add unconditional logging to debugserver for launch/attach processes

Debugging app launch/attach failures can be difficult because of
all of the messages logged to the console on a darwin system;
emitting specific messages around critical API calls can make it
easier to narrow the search for the console messages related to
the failure.



Differential revision: https://reviews.llvm.org/D94357

Added: 


Modified: 
lldb/tools/debugserver/source/MacOSX/MachProcess.mm
lldb/tools/debugserver/source/MacOSX/MachTask.mm
lldb/tools/debugserver/source/RNBRemote.cpp
lldb/tools/debugserver/source/debugserver.cpp

Removed: 




diff  --git a/lldb/tools/debugserver/source/MacOSX/MachProcess.mm 
b/lldb/tools/debugserver/source/MacOSX/MachProcess.mm
index 7176a47972bf..1b962da8d02b 100644
--- a/lldb/tools/debugserver/source/MacOSX/MachProcess.mm
+++ b/lldb/tools/debugserver/source/MacOSX/MachProcess.mm
@@ -140,8 +140,10 @@ static bool CallBoardSystemServiceOpenApplication(NSString 
*bundleIDNSStr,
 cstr = "";
 
   NSString *description = [options description];
-  DNBLog("About to launch process for bundle ID: %s - options:\n%s", cstr,
-[description UTF8String]);
+  DNBLog("[LaunchAttach] START (%d) templated *Board launcher: app lunch "
+ "request for "
+ "'%s' - options:\n%s",
+ getpid(), cstr, [description UTF8String]);
   [system_service
   openApplication:bundleIDNSStr
   options:options
@@ -156,33 +158,36 @@ static bool 
CallBoardSystemServiceOpenApplication(NSString *bundleIDNSStr,
if (wants_pid) {
  pid_in_block =
  [system_service pidForApplication:bundleIDNSStr];
- DNBLog(
- "In completion handler, got pid for bundle id, pid: %d.",
- pid_in_block);
- DNBLogThreadedIf(
- LOG_PROCESS,
- "In completion handler, got pid for bundle id, pid: %d.",
- pid_in_block);
-   } else
- DNBLogThreadedIf(LOG_PROCESS,
-  "In completion handler: success.");
+ DNBLog("[LaunchAttach] In completion handler, got pid for "
+"bundle id "
+"'%s', pid: %d.",
+cstr, pid_in_block);
+   } else {
+ DNBLog("[LaunchAttach] In completion handler, launch was "
+"successful, "
+"debugserver did not ask for the pid");
+   }
  } else {
const char *error_str =
[(NSString *)[bks_error localizedDescription] UTF8String];
if (error_str) {
  open_app_error_string = error_str;
- DNBLogError("In app launch attempt, got error "
- "localizedDescription '%s'.", error_str);
+ DNBLogError(
+ "[LaunchAttach] END (%d) In app launch attempt, got error 
"
+ "localizedDescription '%s'.",
+ getpid(), error_str);
  const char *obj_desc = 
   [NSString stringWithFormat:@"%@", bks_error].UTF8String;
- DNBLogError("In app launch attempt, got error "
- "NSError object description: '%s'.",
- obj_desc);
+ DNBLogError(
+ "[LaunchAttach] END (%d) In app launch attempt, got error 
"
+ "NSError object description: '%s'.",
+ getpid(), obj_desc);
}
-   DNBLogThreadedIf(LOG_PROCESS, "In completion handler for send "
- "event, got error \"%s\"(%ld).",
+   DNBLogThreadedIf(LOG_PROCESS,
+"In completion handler for send "
+"event, got error \"%s\"(%ld).",
 error_str ? error_str : "",
-open_app_error);
+(long)open_app_error);
  }
 
  [system_service release];
@@ -200,15 +205,23 @@ static bool 
CallBoardSystemServiceOpenApplication(NSString *bundleIDNSStr,
 
   dispatch_release(semaphore);
 
+  DNBLog("[LaunchAttach] END (%d) templated *Board launcher finished app lunch 
"
+ "request for "
+ "'%s'",
+ getpid(), cstr);
+
   if (!success) {
-DNBLogError("timed 

[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux

2021-01-11 Thread walter erquinigo via Phabricator via lldb-commits
wallace added a comment.

I've done a lightweight test and it seems that the BaseThreadPlan is being 
asked for the stop reason when the exec happens, but it holds a reference to 
the thread whose destructor has been called, which causes the crash. On Darwin, 
as Greg said, the BaseThreadPlan is deleted when the thread changes, so this 
doesn't happen.
Later this week I'll spend more time gathering logs and I'll share them here in 
a nice format.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93874/new/

https://reviews.llvm.org/D93874

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux

2021-01-11 Thread Jim Ingham via Phabricator via lldb-commits
jingham added inline comments.



Comment at: lldb/test/API/functionalities/exec/TestExec.py:93
+# we clear all existing thread plans.
+thread.StepInstruction(False)
+

clayborg wrote:
> On Darwin threads have unique identifiers and the thread ID before exec != 
> thread ID after exec. On linux, we get the same thread ID for the thread that 
> does exec: thread ID before exec == thread ID after exec. 
> 
> So on Darwin when we stop, we will remove all thread plans that are not part 
> of the current thread list and since the globally unique thread ID has 
> changed, it will toss out any thread plans for the old thread. On linux this 
> stays around and since the ID is the same, and since the thread lists gets 
> cleared on exec, we then have a case where the thread plan outlives the 
> thread shared pointers. The thread lists (real and user visible) are cleared 
> in ProcessGDBRemote::SetLastStopPacket() when did_exec == true.
> On Darwin threads have unique identifiers and the thread ID before exec != 
> thread ID after exec. On linux, we get the same thread ID for the thread that 
> does exec: thread ID before exec == thread ID after exec. 
> 
> So on Darwin when we stop, we will remove all thread plans that are not part 
> of the current thread list and since the globally unique thread ID has 
> changed, it will toss out any thread plans for the old thread. On linux this 
> stays around and since the ID is the same, and since the thread lists gets 
> cleared on exec, we then have a case where the thread plan outlives the 
> thread shared pointers. The thread lists (real and user visible) are cleared 
> in ProcessGDBRemote::SetLastStopPacket() when did_exec == true.

The ThreadPlans don't rely on thread shared pointers except as a cache.  They 
work off the ThreadID as the primary bit of data.  They have to do this to 
handle the case where a thread might not be reported on one stop, and then will 
be reported later on, as happens with OS Plugin threads in the xnu kernel.

The thread shared pointer cache variable gets cleared on "WillResume" and then 
when you stop, the first time it needs to get its Thread, it looks up its owner 
thread by ID from the Process ThreadList.  So provided there's SOME thread with 
that ID after the stop, the thread plan really won't know any difference.

But more importantly, there should be no way to ask a ThreadPlan questions 
directly.  You are always asking the Thread something (stop reason or 
ShouldStop or whatever), which then forwards the request to that Thread's 
ThreadPlan stack.  So it shouldn't be possible to get to a ThreadPlan whose 
thread no longer exists.  I'd like to know how that's happening, because that's 
going to be a wrong thing to do in any circumstances...


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93874/new/

https://reviews.llvm.org/D93874

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux

2021-01-11 Thread Greg Clayton via Phabricator via lldb-commits
clayborg added inline comments.



Comment at: lldb/test/API/functionalities/exec/TestExec.py:93
+# we clear all existing thread plans.
+thread.StepInstruction(False)
+

On Darwin threads have unique identifiers and the thread ID before exec != 
thread ID after exec. On linux, we get the same thread ID for the thread that 
does exec: thread ID before exec == thread ID after exec. 

So on Darwin when we stop, we will remove all thread plans that are not part of 
the current thread list and since the globally unique thread ID has changed, it 
will toss out any thread plans for the old thread. On linux this stays around 
and since the ID is the same, and since the thread lists gets cleared on exec, 
we then have a case where the thread plan outlives the thread shared pointers. 
The thread lists (real and user visible) are cleared in 
ProcessGDBRemote::SetLastStopPacket() when did_exec == true.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93874/new/

https://reviews.llvm.org/D93874

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux

2021-01-11 Thread Jim Ingham via Phabricator via lldb-commits
jingham added inline comments.



Comment at: lldb/test/API/functionalities/exec/TestExec.py:91-93
+# Single step to create a thread plan. We have to make sure that after 
exec
+# we clear all existing thread plans.
+thread.StepInstruction(False)

wallace wrote:
> labath wrote:
> > A separate test case would be better, as (even with the comment) a random 
> > future modification of the test could negate this.
> Sure! I'll wait for @jingham to chime first
> Sure! I'll wait for @jingham to chime first

I'm a little confused about this crash.  

Before lldb lets the target proceed, it copies the thread list before 
proceeding into a temporary list.  Whenever you stop in the debugger, the first 
thing that happens is ProcessGDBRemote gets the ThreadID's from the stub and 
figures out which of those thread IDs still exist after the stop.  If the 
process plugin sees a ThreadID that it also saw before the stop, then it should 
copy the ThreadSP of the thread over from the "seen at previous stop" list to 
the current thread list.  At that point the Thread should be bona fide

Then all the ShouldStop type questions iterate over the current thread list, so 
if somebody asks the plans of the exec Thread from above which it found in the 
current thread list, ThreadPlan::GetThread will look it up by ID in the current 
thread list, and find and return that valid thread.

OTOH, if the thread ID didn't still exist, the process plugin should discard it 
as part of processing the stop, and nobody should be asking its thread plans 
questions since again you should go through the current thread list and it 
won't be on that list.  

I don't see under what circumstances you are getting a query to the thread plan 
for a bogus thread in this instance.

So I'm a little worried that just throwing away the threads when we know we've 
exec'ed is papering over some flaw lower down in the stack.  Can you give some 
more details on how exactly this is crashing, I don't have a Linux machine 
handy to try stuff out on?

Note, I'm also not at all clear what the StepInstruction in the test is for.  
At that point in the test, the target will have stopped at your first 
breakpoint in main.cpp, which is fair ways (more than one instruction) before 
the exec in main.cpp.

When you stopped before the step, the thread you stopped on will have one 
ThreadPlan on its thread plan stack: the ThreadPlanBase.  That's the plan which 
handles all the "unexpected" stops - ones that aren't governed by a thread plan 
which is actually driving the thread For instance that handles hitting an 
unrelated breakpoint while doing a step-over, etc.  It is always present, and 
can't be popped.  

Doing a thread.StepInstruction at that point pushes the "StepOverInstruction" 
thread plan, runs the process till the StepOverInstruction thread plan says it 
is done, and then pops that thread plan.  So when you stop after the 
instruction step, the thread plan stack for this thread should be the same as 
it was before the instruction step.

If this step-i is necessary to trigger the bug, then again I'm worried we're 
working around some logic goof rather than finding and fixing the actual 
problem.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93874/new/

https://reviews.llvm.org/D93874

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux

2021-01-11 Thread Greg Clayton via Phabricator via lldb-commits
clayborg added a comment.

I still think we should add a Process::ProcessDidExec() as mentioned in the 
inline comments.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93874/new/

https://reviews.llvm.org/D93874

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D93951: [vscode] Improve runInTerminal and support linux

2021-01-11 Thread Greg Clayton via Phabricator via lldb-commits
clayborg accepted this revision.
clayborg added a comment.
This revision is now accepted and ready to land.

Just one rename of an instance variable and this is good to go! Much cleaner 
without the directory and multiple files.




Comment at: lldb/tools/lldb-vscode/FifoFiles.h:26
+
+  std::string path;
+};

rename to "m_path" since this is an instance variable.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93951/new/

https://reviews.llvm.org/D93951

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D78978: [LLDB] Add support for WebAssembly debugging

2021-01-11 Thread Adrian Prantl via Phabricator via lldb-commits
aprantl added a comment.

Thanks for the explanation!


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D78978/new/

https://reviews.llvm.org/D78978

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [PATCH] D78978: [LLDB] Add support for WebAssembly debugging

2021-01-11 Thread Paolo Severini via Phabricator via lldb-commits
paolosev added a comment.

In D78978#2488134 , @aprantl wrote:

> What's the testing story for WASM going to be?

Thanks for the feedback @aprantl!
This patch was also create to show a possible alternative to 
https://reviews.llvm.org/D78801, but D78801  
has evolved since then and I should probably close/abandon this one.

I had not looked at the testing story for Wasm yet because it seemed that the 
changes to the core code could have been a showstopper, but we would need to 
test a process connected to LLDB through a GDBRemote connection.
I see that this is normally done using the //lldbsuite// package, 
//GdbRemoteTestCaseBase//, which actually launches the debuggee process.
We cannot do the same for Wasm, we cannot run a JS engine that implements a GDB 
Stub here. We should instead write code similar to GdbRemoteTestCaseBase that 
simulates a Wasm process, implementing a GDB stub, handling GDBRemote messages 
(both generic and Wasm-specific).
(This is the opposite of the work done to test the implementation of the GDB 
Stub in V8: there we had the process and we had to simulate the debugger).

Furthermore, we could also extend:

- test\Shell\Reproducer\TestGDBRemoteRepro.test
- test\API\functionalities\gdb_remote_client\TestGDBRemoteClient.py

with other Wasm-specific tests.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D78978/new/

https://reviews.llvm.org/D78978

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


Re: [Lldb-commits] [PATCH] D94063: lldb: Add support for DW_AT_ranges on DW_TAG_subprograms

2021-01-11 Thread Jim Ingham via lldb-commits


> On Jan 10, 2021, at 5:37 PM, David Blaikie  wrote:
> 
> Thanks for all the context - so sounds like mostly based on (3) the 
> recommendation would be for this to be an API test (is there a way to test 
> the line table directly? good place for reference on the SB API options - I 
> looked at a few tests and they seemed quite different ( 
> lldb/test/API/functionalities/breakpoint/move_nearest/TestMoveNearest.py and 
> lldb/test/API/commands/breakpoint/set/func-regex/TestBreakpointRegexError.py 
> ) in the way they're written, so not sure what the norms are/how they work).
> 

You can get directly at the line table from the SB API's.  There's an example 
of doing just that on the SBCompileUnit page:

https://lldb.llvm.org/python_reference/lldb.SBCompileUnit-class.html

There's a small sample test in API/sample_test which is a good place to start.  
That one assumes you are running the target, however, which you may or may not 
need to do.  We've accumulated lots of convenience methods over time to make 
testing easier, but we haven't back ported them to old tests.  If you want 
other models than the sample_test, look for ones made more recently.

There's some description of the API tests here:

https://lldb.llvm.org/resources/test.html

and some test writing info in the sources in:

llvm-project/lldb/docs/testsuite/best-practices.txt


> But more fundamentally, seems all the API tests are "unsupported" on my 
> system, and I can't seem to figure out what makes them unsupported according 
> to lit. Any ideas?

I don't know much about how the lit runner works, somebody else will have to 
answer that.

Jim

> 
> On Thu, Jan 7, 2021 at 4:55 PM Jim Ingham  wrote:
> 
> 
> > On Jan 7, 2021, at 3:57 PM, David Blaikie  wrote:
> > 
> > On Thu, Jan 7, 2021 at 3:37 PM Jim Ingham via lldb-commits
> >  wrote:
> >> 
> >> 
> >> 
> >>> On Jan 7, 2021, at 2:29 PM, David Blaikie via Phabricator via 
> >>> lldb-commits  wrote:
> >>> 
> >>> dblaikie added a comment.
> >>> 
> >>> In D94063#2485271 , @labath 
> >>> wrote:
> >>> 
>  In D94063#2483546 , @dblaikie 
>  wrote:
>  
> > If it's better to write it using C++ source and custom clang flags I 
> > can do that instead (it'll be an -mllvm flag - looks like there's one 
> > other test that does that: 
> > `lldb/test/API/lang/objc/forward-decl/TestForwardDecl.py:
> > dict(CFLAGS_EXTRAS="-dwarf-version=5 -mllvm -accel-tables=Dwarf"))`) - 
> > means the test will be a bit more convoluted to tickle the subprogram 
> > ranges, but not much - just takes two functions+function-sections.
>  
>  I certainly wouldn't want to drop the existing test.
> >>> 
> >>> Ah, what's the tradeoff between the different test types here?
> >> 
> >> This is my take (this has been a contentious issue so I'm sure there are 
> >> other takes...):
> >> 
> >> The "Shell" tests use pattern matching against the lldb command line 
> >> output.  They are useful for testing the details of the command 
> >> interaction. You can also do that using pexpect in the API tests, but the 
> >> Python 2.7 version of pexpect seemed really flakey so we switched to shell 
> >> tests for this sort of thing.
> >> 
> >> Because you are matching against text output that isn't API, they are less 
> >> stable.  For instance if we changed anything in the "break set" output, 
> >> your test would fail(*).  And because you are picking details out of that 
> >> text, the tests are less precise.  You either have to match more of the 
> >> command line than you are actually testing for, which isn't a good 
> >> practice, or you run the risk of finding the text you were looking for in 
> >> a directory path or other unrelated part of the output.  Also they are 
> >> harder to debug if you can't reproduce the failure locally, since it isn't 
> >> easy to add internal checks/output to the test to try hypotheses.  
> >> Whenever I have run into failures of this sort the first thing I do is 
> >> convert the test to an API test...
> >> 
> >> But the main benefit of the "Shell" tests is that you can write tests 
> >> without having to know Python or learn the lldb Python API's.  And if you 
> >> are coming from clang you already know how FileCheck tests work, so that's 
> >> a bonus.  I think it's legit to require that folks actually working on 
> >> lldb learn the SB API's.  But we were persuaded that it wasn't fair to 
> >> impose that on people not working on lldb, and yet such folks do sometimes 
> >> need to write tests for lldb...  So for simple tests, the Shell tests are 
> >> an okay option.  But really, there's nothing you can do in a Shell test 
> >> that you can't do in an API test.
> >> 
> >> The "API" tests use the Python SB API's - though they also have the 
> >> ability to run commands and do expect type checks on the output so for 
> >> single commands they work much as the shell 

[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux

2021-01-11 Thread walter erquinigo via Phabricator via lldb-commits
wallace added a comment.

@jingham, friendly ping :)


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D93874/new/

https://reviews.llvm.org/D93874

___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits


[Lldb-commits] [lldb] d36e879 - [lldb] Disable PipeTest.OpenAsReader on windows

2021-01-11 Thread Pavel Labath via lldb-commits

Author: Pavel Labath
Date: 2021-01-11T13:37:49+01:00
New Revision: d36e879c21c9620c9b6a1a8f45afe46379142d2f

URL: 
https://github.com/llvm/llvm-project/commit/d36e879c21c9620c9b6a1a8f45afe46379142d2f
DIFF: 
https://github.com/llvm/llvm-project/commit/d36e879c21c9620c9b6a1a8f45afe46379142d2f.diff

LOG: [lldb] Disable PipeTest.OpenAsReader on windows

This test seems to be broken there (which is not totally surprising as
this functionality was never used on windows). Disable the test while I
investigate.

Added: 


Modified: 
lldb/unittests/Host/PipeTest.cpp

Removed: 




diff  --git a/lldb/unittests/Host/PipeTest.cpp 
b/lldb/unittests/Host/PipeTest.cpp
index e8d2c49c4490..35a44ccf0373 100644
--- a/lldb/unittests/Host/PipeTest.cpp
+++ b/lldb/unittests/Host/PipeTest.cpp
@@ -29,6 +29,8 @@ TEST_F(PipeTest, CreateWithUniqueName) {
 llvm::Succeeded());
 }
 
+// Test broken
+#ifndef _WIN32
 TEST_F(PipeTest, OpenAsReader) {
   Pipe pipe;
   llvm::SmallString<0> name;
@@ -46,3 +48,4 @@ TEST_F(PipeTest, OpenAsReader) {
   pipe.OpenAsReader(name_ref, /*child_process_inherit=*/false).ToError(),
   llvm::Succeeded());
 }
+#endif



___
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits