And here's a rewrite of the coredump bits that does the sparse amap
handling on all amaps that aren't shared at the time of the coredump, so
it'll get consistent data and not panic the kernel. Doing this without
locking the shared amaps while doing the walks or writing to disk requires
redoing the API between UVM and the ELF bits, but nothing horrendous IMO.
Tested on macppc, including verification that whether a process is sharing
maps changes the break down into the core file.
Philip
Index: uvm/uvm_extern.h
===================================================================
RCS file: /data/src/openbsd/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.139
diff -u -p -r1.139 uvm_extern.h
--- uvm/uvm_extern.h 5 Jun 2016 08:35:57 -0000 1.139
+++ uvm/uvm_extern.h 1 Feb 2017 07:20:06 -0000
@@ -243,16 +243,16 @@ extern struct pool *uvm_aiobuf_pool;
* used to keep state while iterating over the map for a core dump.
*/
struct uvm_coredump_state {
- void *cookie; /* opaque for the caller */
- vaddr_t start; /* start of region */
- vaddr_t realend; /* real end of region */
- vaddr_t end; /* virtual end of region */
- vm_prot_t prot; /* protection of region */
- int flags; /* flags; see below */
+ struct proc *us_p; /* the thread doing the dumping */
+ void *us_iocookie; /* opaque for the caller */
+ void *us_cookie; /* opaque for the caller */
+ vaddr_t us_start; /* start of region */
+ vaddr_t us_realend; /* real end of region (<= end) */
+ vaddr_t us_end; /* virtual end of region */
+ vm_prot_t us_prot; /* protection of region */
+ int us_nsegment; /* index of current segment */
};
-#define UVM_COREDUMP_STACK 0x01 /* region is user stack */
-
/*
* the various kernel maps, owned by MD code
*/
@@ -458,9 +458,10 @@ int uvm_pglistalloc(psize_t,
paddr_t,
void uvm_pglistfree(struct pglist *);
void uvm_pmr_use_inc(paddr_t, paddr_t);
void uvm_swap_init(void);
-int uvm_coredump_walkmap(struct proc *,
- void *, int (*)(struct proc *, void *,
- struct uvm_coredump_state *), void *);
+int uvm_coredump_walkmap(struct proc *_p, void *_iocookie,
+ int (*_setup)(const struct uvm_coredump_state *),
+ int (*_walk)(const struct uvm_coredump_state *),
+ void *_cookie);
void uvm_grow(struct proc *, vaddr_t);
void uvm_deallocate(vm_map_t, vaddr_t, vsize_t);
struct uvm_object *uvn_attach(struct vnode *, vm_prot_t);
Index: uvm/uvm_unix.c
===================================================================
RCS file: /data/src/openbsd/src/sys/uvm/uvm_unix.c,v
retrieving revision 1.60
diff -u -p -r1.60 uvm_unix.c
--- uvm/uvm_unix.c 16 Sep 2016 01:09:53 -0000 1.60
+++ uvm/uvm_unix.c 1 Feb 2017 09:02:41 -0000
@@ -134,81 +134,225 @@ uvm_grow(struct proc *p, vaddr_t sp)
#ifndef SMALL_KERNEL
+#define WALK_CHUNK 32
+int
+uvm_coredump_walk_amap(struct vm_map_entry *entry,
+ int (*walk)(const struct uvm_coredump_state *),
+ struct uvm_coredump_state *state)
+{
+ struct vm_anon *anons[WALK_CHUNK];
+ vaddr_t start, end;
+ int absent = 0;
+ int npages, i, error;
+
+ state->us_start = start = entry->start;
+ end = MIN(entry->end, VM_MAXUSER_ADDRESS);
+
+ for (; start < end; start += npages << PAGE_SHIFT) {
+ npages = (end - start) >> PAGE_SHIFT;
+ if (npages > WALK_CHUNK)
+ npages = WALK_CHUNK;
+ amap_lookups(&entry->aref, start - entry->start, anons, npages);
+ for (i = 0; i < npages; i++) {
+ if ((anons[i] == NULL) == absent)
+ continue;
+ if (!absent) {
+ /* going from present -> absent: set realend */
+ state->us_realend = start + (i << PAGE_SHIFT);
+ absent = 1;
+ continue;
+ }
+
+ /* going from absent to present: invoke callback */
+ state->us_end = start + (i << PAGE_SHIFT);
+ if (state->us_start != state->us_end) {
+ error = (*walk)(state);
+ if (error)
+ return error;
+ state->us_nsegment++;
+ }
+ state->us_start = state->us_realend = state->us_end;
+ absent = 0;
+ }
+ }
+
+ if (!absent)
+ state->us_realend = end;
+ state->us_end = end;
+ error = (*walk)(state);
+ state->us_nsegment++;
+ return error;
+}
+
/*
- * Walk the VA space for a process, invoking 'func' on each present range
- * that should be included in a coredump.
+ * Common logic for whether a map entry should be included in a coredump
*/
+static inline int
+uvm_should_coredump(struct proc *p, struct vm_map_entry *entry)
+{
+ if (!(entry->protection & PROT_WRITE) &&
+ entry->aref.ar_amap == NULL &&
+ entry->start != p->p_p->ps_sigcode)
+ return 0;
+
+ /*
+ * Skip ranges marked as unreadable, as uiomove(UIO_USERSPACE)
+ * will fail on them. Maybe this really should be a test of
+ * entry->max_protection, but doing
+ * uvm_map_extract(UVM_EXTRACT_FIXPROT)
+ * on each such page would suck.
+ */
+ if ((entry->protection & PROT_READ) == 0)
+ return 0;
+
+ /* Don't dump mmaped devices. */
+ if (entry->object.uvm_obj != NULL &&
+ UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
+ return 0;
+
+ if (entry->start >= VM_MAXUSER_ADDRESS)
+ return 0;
+
+ return 1;
+}
+
+
+/* do nothing callback for uvm_coredump_walk_amap() */
+static int
+noop(const struct uvm_coredump_state *state)
+{
+ return 0;
+}
+
+/*
+ * Walk the VA space for a process twice for ranges that should be
+ * included in a coredump, invoking func1 on each present range in
+ * the first pass, then invoking func2 once between passes, then
+ * invoking func3 on each present range in the second pass. The
+ * ranges seen by func1 and func3 are guaranteed to be the same.
+ */
+
+int sparse = 1;
+
int
uvm_coredump_walkmap(struct proc *p, void *iocookie,
- int (*func)(struct proc *, void *, struct uvm_coredump_state *),
- void *cookie)
+ int (*setup)(const struct uvm_coredump_state *),
+ int (*walk)(const struct uvm_coredump_state *), void *cookie)
{
struct uvm_coredump_state state;
struct vmspace *vm = p->p_vmspace;
struct vm_map *map = &vm->vm_map;
struct vm_map_entry *entry;
- vaddr_t top;
- int error;
+ int refed_amaps = 0;
+ int error = 0;
+ state.us_p = p;
+ state.us_iocookie = iocookie;
+ state.us_cookie = cookie;
+
+ /*
+ * Walk the map once to count the segments. If an amap is
+ * referenced more than once than take *another* reference
+ * and treat the amap as exactly one segment instead of
+ * checking page presence inside it. On the second pass
+ * we'll recognize which amaps we did that for by the ref
+ * count being >1...and decrement it then.
+ */
+ state.us_nsegment = 0;
RBT_FOREACH(entry, uvm_map_addr, &map->addr) {
- state.cookie = cookie;
- state.prot = entry->protection;
- state.flags = 0;
-
/* should never happen for a user process */
if (UVM_ET_ISSUBMAP(entry)) {
panic("%s: user process with submap?", __func__);
}
- if (!(entry->protection & PROT_WRITE) &&
- entry->aref.ar_amap == NULL &&
- entry->start != p->p_p->ps_sigcode)
+ if (! uvm_should_coredump(p, entry))
continue;
- /* Don't dump mmaped devices. */
- if (entry->object.uvm_obj != NULL &&
- UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
+ if (sparse && entry->aref.ar_amap != NULL) {
+ if (entry->aref.ar_amap->am_ref == 1) {
+ uvm_coredump_walk_amap(entry, &noop, &state);
+ continue;
+ }
+
+ /*
+ * Multiple refs currently, so take another and
+ * treat it as a single segment
+ */
+ entry->aref.ar_amap->am_ref++;
+ refed_amaps++;
+ }
+
+ state.us_nsegment++;
+ }
+
+ /*
+ * Okay, we have a count in state.us_nsegment. Prepare to
+ * walk it again, then invoke the setup callback.
+ */
+ entry = RBT_MIN(uvm_map_addr, &map->addr);
+ error = (*setup)(&state);
+ if (error)
+ goto cleanup;
+
+ /*
+ * Setup went okay, so do the second walk, invoking the walk
+ * callback on the counted segments and cleaning up references
+ * as we go.
+ */
+ state.us_nsegment = 0;
+ for (; entry != NULL; entry = RBT_NEXT(uvm_map_addr, entry)) {
+ if (! uvm_should_coredump(p, entry))
continue;
- state.start = entry->start;
- state.realend = entry->end;
- state.end = entry->end;
+ state.us_prot = entry->protection;
- if (state.start >= VM_MAXUSER_ADDRESS)
+ if (sparse && entry->aref.ar_amap != NULL &&
+ entry->aref.ar_amap->am_ref == 1) {
+ error = uvm_coredump_walk_amap(entry, walk, &state);
+ if (error)
+ break;
continue;
+ }
- if (state.end > VM_MAXUSER_ADDRESS)
- state.end = VM_MAXUSER_ADDRESS;
+ state.us_start = entry->start;
+ state.us_realend = entry->end;
+ state.us_end = entry->end;
-#ifdef MACHINE_STACK_GROWS_UP
- if ((vaddr_t)vm->vm_maxsaddr <= state.start &&
- state.start < ((vaddr_t)vm->vm_maxsaddr + MAXSSIZ)) {
- top = round_page((vaddr_t)vm->vm_maxsaddr +
- ptoa(vm->vm_ssize));
- if (state.end > top)
- state.end = top;
+ if (state.us_end > VM_MAXUSER_ADDRESS)
+ state.us_end = VM_MAXUSER_ADDRESS;
- if (state.start >= state.end)
- continue;
-#else
- if (state.start >= (vaddr_t)vm->vm_maxsaddr) {
- top = trunc_page((vaddr_t)vm->vm_minsaddr -
- ptoa(vm->vm_ssize));
- if (state.start < top)
- state.start = top;
+ error = (*walk)(&state);
+ if (error)
+ break;
+ state.us_nsegment++;
- if (state.start >= state.end)
- continue;
-#endif
- state.flags |= UVM_COREDUMP_STACK;
+ if (sparse && entry->aref.ar_amap != NULL &&
+ entry->aref.ar_amap->am_ref > 1) {
+ /* multiple refs, so we need to drop one */
+ entry->aref.ar_amap->am_ref--;
+ refed_amaps--;
}
+ }
- error = (*func)(p, iocookie, &state);
- if (error)
- return (error);
+ if (error) {
+cleanup:
+ /* clean up the extra references from where we left off */
+ if (refed_amaps > 0) {
+ for (; entry != NULL;
+ entry = RBT_NEXT(uvm_map_addr, entry)) {
+ if (entry->aref.ar_amap == NULL ||
+ entry->aref.ar_amap->am_ref == 1)
+ continue;
+ if (! uvm_should_coredump(p, entry))
+ continue;
+ entry->aref.ar_amap->am_ref--;
+ if (refed_amaps-- == 0)
+ break;
+ }
+ }
}
- return (0);
+ return (error);
}
#endif /* !SMALL_KERNEL */
Index: kern/exec_elf.c
===================================================================
RCS file: /data/src/openbsd/src/sys/kern/exec_elf.c,v
retrieving revision 1.130
diff -u -p -r1.130 exec_elf.c
--- kern/exec_elf.c 21 Jan 2017 05:42:03 -0000 1.130
+++ kern/exec_elf.c 1 Feb 2017 07:54:14 -0000
@@ -950,21 +950,28 @@ out1:
return error;
}
-struct countsegs_state {
- int npsections;
-};
+#ifdef SMALL_KERNEL
+
+int
+ELFNAMEEND(coredump)(struct proc *p, void *cookie)
+{
+ return EPERM;
+}
-int ELFNAMEEND(coredump_countsegs)(struct proc *, void *,
- struct uvm_coredump_state *);
+#else
struct writesegs_state {
- Elf_Phdr *psections;
+ off_t notestart;
+ off_t secstart;
off_t secoff;
+ Elf_Phdr *psections;
+ size_t psectionslen;
+ size_t notesize;
+ int npsections;
};
-int ELFNAMEEND(coredump_writeseghdrs)(struct proc *, void *,
- struct uvm_coredump_state *);
-
+int ELFNAMEEND(coredump_setup)(const struct uvm_coredump_state *state);
+int ELFNAMEEND(coredump_walk)(const struct uvm_coredump_state *state);
int ELFNAMEEND(coredump_notes)(struct proc *, void *, size_t *);
int ELFNAMEEND(coredump_note)(struct proc *, void *, size_t *);
int ELFNAMEEND(coredump_writenote)(struct proc *, void *, Elf_Note *,
@@ -976,194 +983,175 @@ int ELFNAMEEND(coredump_writenote)(struc
int
ELFNAMEEND(coredump)(struct proc *p, void *cookie)
{
-#ifdef SMALL_KERNEL
- return EPERM;
-#else
- Elf_Ehdr ehdr;
- Elf_Phdr *psections = NULL;
- struct countsegs_state cs;
+#ifdef DIAGNOSTIC
+ off_t offset;
+#endif
struct writesegs_state ws;
- off_t notestart, secstart, offset;
- size_t notesize, psectionslen;
+ size_t notesize;
int error, i;
+ ws.psections = NULL;
+
/*
- * We have to make a total of 3 passes across the map:
- *
- * 1. Count the number of map entries (the number of
- * PT_LOAD sections).
- *
- * 2. Write the P-section headers.
- *
- * 3. Write the P-sections.
+ * Walk the map to get all the segment offsets and lengths,
+ * write out the ELF header.
*/
-
- /* Pass 1: count the entries. */
- cs.npsections = 0;
- error = uvm_coredump_walkmap(p, NULL,
- ELFNAMEEND(coredump_countsegs), &cs);
- if (error)
- goto out;
-
- /* Count the PT_NOTE section. */
- cs.npsections++;
-
- /* Get the size of the notes. */
- error = ELFNAMEEND(coredump_notes)(p, NULL, ¬esize);
- if (error)
- goto out;
-
- memset(&ehdr, 0, sizeof(ehdr));
- memcpy(ehdr.e_ident, ELFMAG, SELFMAG);
- ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS;
- ehdr.e_ident[EI_DATA] = ELF_TARG_DATA;
- ehdr.e_ident[EI_VERSION] = EV_CURRENT;
- /* XXX Should be the OSABI/ABI version of the executable. */
- ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV;
- ehdr.e_ident[EI_ABIVERSION] = 0;
- ehdr.e_type = ET_CORE;
- /* XXX This should be the e_machine of the executable. */
- ehdr.e_machine = ELF_TARG_MACH;
- ehdr.e_version = EV_CURRENT;
- ehdr.e_entry = 0;
- ehdr.e_phoff = sizeof(ehdr);
- ehdr.e_shoff = 0;
- ehdr.e_flags = 0;
- ehdr.e_ehsize = sizeof(ehdr);
- ehdr.e_phentsize = sizeof(Elf_Phdr);
- ehdr.e_phnum = cs.npsections;
- ehdr.e_shentsize = 0;
- ehdr.e_shnum = 0;
- ehdr.e_shstrndx = 0;
-
- /* Write out the ELF header. */
- error = coredump_write(cookie, UIO_SYSSPACE, &ehdr, sizeof(ehdr));
- if (error)
- goto out;
-
- psections = mallocarray(cs.npsections, sizeof(Elf_Phdr),
- M_TEMP, M_WAITOK|M_ZERO);
- psectionslen = cs.npsections * sizeof(Elf_Phdr);
-
- offset = sizeof(ehdr);
- notestart = offset + psectionslen;
- secstart = notestart + notesize;
-
- /* Pass 2: now write the P-section headers. */
- ws.secoff = secstart;
- ws.psections = psections;
- error = uvm_coredump_walkmap(p, cookie,
- ELFNAMEEND(coredump_writeseghdrs), &ws);
+ error = uvm_coredump_walkmap(p, cookie, ELFNAMEEND(coredump_setup),
+ ELFNAMEEND(coredump_walk), &ws);
if (error)
goto out;
- /* Write out the PT_NOTE header. */
- ws.psections->p_type = PT_NOTE;
- ws.psections->p_offset = notestart;
- ws.psections->p_vaddr = 0;
- ws.psections->p_paddr = 0;
- ws.psections->p_filesz = notesize;
- ws.psections->p_memsz = 0;
- ws.psections->p_flags = PF_R;
- ws.psections->p_align = ELFROUNDSIZE;
-
- error = coredump_write(cookie, UIO_SYSSPACE, psections, psectionslen);
+ error = coredump_write(cookie, UIO_SYSSPACE, ws.psections,
+ ws.psectionslen);
if (error)
goto out;
-#ifdef DIAGNOSTIC
- offset += psectionslen;
- if (offset != notestart)
- panic("coredump: offset %lld != notestart %lld",
- (long long) offset, (long long) notestart);
-#endif
-
/* Write out the notes. */
error = ELFNAMEEND(coredump_notes)(p, cookie, ¬esize);
if (error)
goto out;
#ifdef DIAGNOSTIC
- offset += notesize;
- if (offset != secstart)
+ if (notesize != ws.notesize)
+ panic("coredump: notesize changed: %zu != %zu",
+ ws.notesize, notesize);
+ offset = ws.notestart + notesize;
+ if (offset != ws.secstart)
panic("coredump: offset %lld != secstart %lld",
- (long long) offset, (long long) secstart);
+ (long long) offset, (long long) ws.secstart);
#endif
/* Pass 3: finally, write the sections themselves. */
- for (i = 0; i < cs.npsections - 1; i++) {
- if (psections[i].p_filesz == 0)
+ for (i = 0; i < ws.npsections - 1; i++) {
+ Elf_Phdr *pent = &ws.psections[i];
+ if (pent->p_filesz == 0)
continue;
#ifdef DIAGNOSTIC
- if (offset != psections[i].p_offset)
+ if (offset != pent->p_offset)
panic("coredump: offset %lld != p_offset[%d] %lld",
(long long) offset, i,
- (long long) psections[i].p_filesz);
+ (long long) pent->p_filesz);
#endif
error = coredump_write(cookie, UIO_USERSPACE,
- (void *)(vaddr_t)psections[i].p_vaddr,
- psections[i].p_filesz);
+ (void *)(vaddr_t)pent->p_vaddr, pent->p_filesz);
if (error)
goto out;
- coredump_unmap(cookie, (vaddr_t)psections[i].p_vaddr,
- (vaddr_t)psections[i].p_vaddr + psections[i].p_filesz);
+ coredump_unmap(cookie, (vaddr_t)pent->p_vaddr,
+ (vaddr_t)pent->p_vaddr + pent->p_filesz);
#ifdef DIAGNOSTIC
- offset += psections[i].p_filesz;
+ offset += ws.psections[i].p_filesz;
#endif
}
out:
- free(psections, M_TEMP, psectionslen);
+ free(ws.psections, M_TEMP, ws.psectionslen);
return (error);
-#endif
}
+
int
-ELFNAMEEND(coredump_countsegs)(struct proc *p, void *iocookie,
- struct uvm_coredump_state *us)
+ELFNAMEEND(coredump_setup)(const struct uvm_coredump_state *us)
{
-#ifndef SMALL_KERNEL
- struct countsegs_state *cs = us->cookie;
+ Elf_Ehdr ehdr;
+ struct writesegs_state *ws = us->us_cookie;
+ Elf_Phdr *note;
+ int error;
- cs->npsections++;
-#endif
- return (0);
+ /* Get the count of segments, plus one for the PT_NOTE */
+ ws->npsections = us->us_nsegment + 1;
+
+ /* Get the size of the notes. */
+ error = ELFNAMEEND(coredump_notes)(us->us_p, NULL, &ws->notesize);
+ if (error)
+ return error;
+
+ /* Setup the ELF header */
+ memset(&ehdr, 0, sizeof(ehdr));
+ memcpy(ehdr.e_ident, ELFMAG, SELFMAG);
+ ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS;
+ ehdr.e_ident[EI_DATA] = ELF_TARG_DATA;
+ ehdr.e_ident[EI_VERSION] = EV_CURRENT;
+ /* XXX Should be the OSABI/ABI version of the executable. */
+ ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV;
+ ehdr.e_ident[EI_ABIVERSION] = 0;
+ ehdr.e_type = ET_CORE;
+ /* XXX This should be the e_machine of the executable. */
+ ehdr.e_machine = ELF_TARG_MACH;
+ ehdr.e_version = EV_CURRENT;
+ ehdr.e_entry = 0;
+ ehdr.e_phoff = sizeof(ehdr);
+ ehdr.e_shoff = 0;
+ ehdr.e_flags = 0;
+ ehdr.e_ehsize = sizeof(ehdr);
+ ehdr.e_phentsize = sizeof(Elf_Phdr);
+ ehdr.e_phnum = ws->npsections;
+ ehdr.e_shentsize = 0;
+ ehdr.e_shnum = 0;
+ ehdr.e_shstrndx = 0;
+
+ /* Write out the ELF header. */
+ error = coredump_write(us->us_iocookie, UIO_SYSSPACE,
+ &ehdr, sizeof(ehdr));
+ if (error)
+ return error;
+
+ /*
+ * Allocate the segment header array and setup to collect
+ * the section sizes and offsets
+ */
+ ws->psections = mallocarray(ws->npsections, sizeof(Elf_Phdr),
+ M_TEMP, M_WAITOK|M_ZERO);
+ ws->psectionslen = ws->npsections * sizeof(Elf_Phdr);
+
+ ws->notestart = sizeof(ehdr) + ws->psectionslen;
+ ws->secstart = ws->notestart + ws->notesize;
+ ws->secoff = ws->secstart;
+
+ /* Fill in the PT_NOTE segment header in the last slot */
+ note = &ws->psections[ws->npsections - 1];
+ note->p_type = PT_NOTE;
+ note->p_offset = ws->notestart;
+ note->p_vaddr = 0;
+ note->p_paddr = 0;
+ note->p_filesz = ws->notesize;
+ note->p_memsz = 0;
+ note->p_flags = PF_R;
+ note->p_align = ELFROUNDSIZE;
+
+ return 0;
}
int
-ELFNAMEEND(coredump_writeseghdrs)(struct proc *p, void *iocookie,
- struct uvm_coredump_state *us)
+ELFNAMEEND(coredump_walk)(const struct uvm_coredump_state *us)
{
-#ifndef SMALL_KERNEL
- struct writesegs_state *ws = us->cookie;
+ struct writesegs_state *ws = us->us_cookie;
Elf_Phdr phdr;
vsize_t size, realsize;
- size = us->end - us->start;
- realsize = us->realend - us->start;
+ size = us->us_end - us->us_start;
+ realsize = us->us_realend - us->us_start;
phdr.p_type = PT_LOAD;
phdr.p_offset = ws->secoff;
- phdr.p_vaddr = us->start;
+ phdr.p_vaddr = us->us_start;
phdr.p_paddr = 0;
phdr.p_filesz = realsize;
phdr.p_memsz = size;
phdr.p_flags = 0;
- if (us->prot & PROT_READ)
+ if (us->us_prot & PROT_READ)
phdr.p_flags |= PF_R;
- if (us->prot & PROT_WRITE)
+ if (us->us_prot & PROT_WRITE)
phdr.p_flags |= PF_W;
- if (us->prot & PROT_EXEC)
+ if (us->us_prot & PROT_EXEC)
phdr.p_flags |= PF_X;
phdr.p_align = PAGE_SIZE;
ws->secoff += phdr.p_filesz;
- *ws->psections++ = phdr;
-#endif
+ ws->psections[us->us_nsegment] = phdr;
return (0);
}
@@ -1171,7 +1159,6 @@ ELFNAMEEND(coredump_writeseghdrs)(struct
int
ELFNAMEEND(coredump_notes)(struct proc *p, void *iocookie, size_t *sizep)
{
-#ifndef SMALL_KERNEL
struct ps_strings pss;
struct iovec iov;
struct uio uio;
@@ -1315,14 +1302,12 @@ ELFNAMEEND(coredump_notes)(struct proc *
}
*sizep = size;
-#endif
return (0);
}
int
ELFNAMEEND(coredump_note)(struct proc *p, void *iocookie, size_t *sizep)
{
-#ifndef SMALL_KERNEL
Elf_Note nhdr;
int size, notesize, error;
int namesize;
@@ -1378,7 +1363,6 @@ ELFNAMEEND(coredump_note)(struct proc *p
*sizep = size;
/* XXX Add hook for machdep per-LWP notes. */
-#endif
return (0);
}
@@ -1386,9 +1370,6 @@ int
ELFNAMEEND(coredump_writenote)(struct proc *p, void *cookie, Elf_Note *nhdr,
const char *name, void *data)
{
-#ifdef SMALL_KERNEL
- return EPERM;
-#else
int error;
error = coredump_write(cookie, UIO_SYSSPACE, nhdr, sizeof(*nhdr));
@@ -1401,5 +1382,6 @@ ELFNAMEEND(coredump_writenote)(struct pr
return error;
return coredump_write(cookie, UIO_SYSSPACE, data, nhdr->descsz);
-#endif
}
+
+#endif /* !SMALL_KERNEL */