Source: ffcall
Severity: normal
Tags: patch
User: debian-powe...@lists.debian.org

Dear Maintainer,

The package ffcall fails to build from source on ppc64el because some 
definitions
were missing for the platform.

I have created a patch, mostly copying powerpc64 .c and .s files and making 
some changes
to *powerpc64le.s files to support ELFv2 (basically changing .opd section usage 
by .localentry,
since the former is not in ELFv2 ABI).

Just removing the line that uses '.opd' section allows the package to build, 
but I assumed
it needed an entry point definition, so I just changed it according to what can 
be found on ELFv2
ABI specs at https://wiki.debian.org/ppc64el in ABI Documentation section.

I also added the entries to Makefile.in files. I could have used the same .c 
files that are in use
for powerpc64, but I had rather copied them so there won't be any confusions.

The package still lacks dh-autoreconf usage so configure files are updated but 
as it builds without
it and its mere addition causes the build to break, I prefered to do that in a 
separate work.

I am not an expert on this subject, so I apologize in advance for any messing 
up I could have
done with the patch. So please let me know if you have any comments.


Thanks and regards.
Fernando

-- System Information:
Debian Release: jessie/sid
  APT prefers unstable
  APT policy: (500, 'unstable')
Architecture: ppc64el (ppc64le)

Kernel: Linux 3.16-trunk-powerpc64le (SMP w/32 CPU cores)
Locale: LANG=C, LC_CTYPE=C (charmap=ANSI_X3.4-1968)
Shell: /bin/sh linked to /bin/dash
diff -Nru ffcall-1.10+cvs20100619/debian/changelog ffcall-1.10+cvs20100619/debian/changelog
--- ffcall-1.10+cvs20100619/debian/changelog	2013-12-24 11:56:22.000000000 +0000
+++ ffcall-1.10+cvs20100619/debian/changelog	2014-11-06 01:57:44.000000000 +0000
@@ -1,3 +1,11 @@
+ffcall (1.10+cvs20100619-3ppc64el1) UNRELEASED; urgency=medium
+
+  * Replaced .odp section, unexistent on ELFv2 by .localentry for the entry point 
+    within *.s files, which fixes ftbfs on ppc64el. Additionally made some other
+    modifications for endianness definition for ppc64 into header files.
+
+ -- Fernando Seiti Furusato <ferse...@br.ibm.com>  Thu, 06 Nov 2014 01:42:08 +0000
+
 ffcall (1.10+cvs20100619-3) unstable; urgency=medium
 
   * Update config.* during build (Closes: #727848)
diff -Nru ffcall-1.10+cvs20100619/debian/patches/ppc64el-elfv2.patch ffcall-1.10+cvs20100619/debian/patches/ppc64el-elfv2.patch
--- ffcall-1.10+cvs20100619/debian/patches/ppc64el-elfv2.patch	1970-01-01 00:00:00.000000000 +0000
+++ ffcall-1.10+cvs20100619/debian/patches/ppc64el-elfv2.patch	2014-11-06 01:57:44.000000000 +0000
@@ -0,0 +1,1023 @@
+Index: ffcall-1.10+cvs20100619/avcall/avcall-powerpc64le.c
+===================================================================
+--- /dev/null
++++ ffcall-1.10+cvs20100619/avcall/avcall-powerpc64le.c
+@@ -0,0 +1,167 @@
++#ifndef _avcall_powerpc64_c				/*-*- C -*-*/
++#define _avcall_powerpc64_c
++/**
++  Copyright 1993 Bill Triggs, <bill.tri...@inrialpes.fr>
++  Copyright 1995-2006 Bruno Haible, <br...@clisp.org>
++  Copyright 2000 Adam Fedor, <fe...@gnu.org>
++  Copyright 2004 Paul Guyot, <pgu...@kallisys.net>
++
++  This is free software distributed under the GNU General Public
++  Licence described in the file COPYING. Contact the author if
++  you don't have this or can't live with it. There is ABSOLUTELY
++  NO WARRANTY, explicit or implied, on this software.
++**/
++/*----------------------------------------------------------------------
++  !!! THIS ROUTINE MUST BE COMPILED gcc -O !!!
++
++  Foreign function interface for a 64-bit PowerPC with gcc
++
++  This calls a C function with an argument list built up using macros
++  defined in avcall.h.
++
++  PowerPC64 Argument Passing Conventions:
++
++  All arguments, except the first 8 words, are passed on the stack with
++  word alignment. The first 13 doubles and floats are also passed in
++  floating-point-registers.
++  To return a structure, the called function copies the value to space
++  pointed to by its first argument, and all other arguments are shifted
++  down by one.
++
++  The AIX argument passing conventions are used:
++  - the first 13 doubles and floats are passed in FP registers,
++    and when they do, there is still room allocated for them in the
++    argument sequence (integer regs or stack).
++  - Structures are passed in the argument sequence. But structures
++    containing floats or doubles are passed in FP registers?!
++
++  Compile this routine with gcc -O (or -O2 -fno-omit-frame-pointer or -g -O)
++  to get the right register variables. For other compilers use the
++  pre-compiled assembler version.
++  ----------------------------------------------------------------------*/
++#include "avcall.h.in"
++
++#define STACK_OFFSET 14
++
++#define RETURN(TYPE,VAL)	(*(TYPE*)l->raddr = (TYPE)(VAL))
++
++register double farg1	__asm__("fr1");
++register double farg2	__asm__("fr2");
++register double farg3	__asm__("fr3");
++register double farg4	__asm__("fr4");
++register double farg5	__asm__("fr5");
++register double farg6	__asm__("fr6");
++register double farg7	__asm__("fr7");
++register double farg8	__asm__("fr8");
++register double farg9	__asm__("fr9");
++register double farg10	__asm__("fr10");
++register double farg11	__asm__("fr11");
++register double farg12	__asm__("fr12");
++register double farg13	__asm__("fr13");
++
++int
++__builtin_avcall(av_alist* l)
++{
++  register __avword*	sp	__asm__("r1");  /* C names for registers */
++/*register __avword	iret	__asm__("r3"); */
++  register __avword	iret2	__asm__("r4");
++  register float	fret	__asm__("fr1");
++  register double	dret	__asm__("fr1");
++
++  __avword space[__AV_ALIST_WORDS];	/* space for callee's stack frame */
++  __avword* argframe = sp + STACK_OFFSET;/* stack offset for argument list */
++  int arglen = l->aptr - l->args;
++  __avword i;
++
++  for (i = 8; i < arglen; i++) /* push function args onto stack */
++    argframe[i-8] = l->args[i];
++
++  /* pass first 13 floating-point args in registers */
++  arglen = l->faptr - l->fargs;
++  if (arglen == 0) goto fargs0;
++  else if (arglen == 1) goto fargs1;
++  else if (arglen == 2) goto fargs2;
++  else if (arglen == 3) goto fargs3;
++  else if (arglen == 4) goto fargs4;
++  else if (arglen == 5) goto fargs5;
++  else if (arglen == 6) goto fargs6;
++  else if (arglen == 7) goto fargs7;
++  else if (arglen == 8) goto fargs8;
++  else if (arglen == 9) goto fargs9;
++  else if (arglen == 10) goto fargs10;
++  else if (arglen == 11) goto fargs11;
++  else if (arglen == 12) goto fargs12;
++  else if (arglen == 13) goto fargs13;
++  fargs13: farg13 = l->fargs[12];
++  fargs12: farg12 = l->fargs[11];
++  fargs11: farg11 = l->fargs[10];
++  fargs10: farg10 = l->fargs[9];
++  fargs9: farg9 = l->fargs[8];
++  fargs8: farg8 = l->fargs[7];
++  fargs7: farg7 = l->fargs[6];
++  fargs6: farg6 = l->fargs[5];
++  fargs5: farg5 = l->fargs[4];
++  fargs4: farg4 = l->fargs[3];
++  fargs3: farg3 = l->fargs[2];
++  fargs2: farg2 = l->fargs[1];
++  fargs1: farg1 = l->fargs[0];
++  fargs0: ;
++				/* call function, pass 8 args in registers */
++  i = (*l->func)(l->args[0], l->args[1], l->args[2], l->args[3],
++		 l->args[4], l->args[5], l->args[6], l->args[7]);
++
++  /* save return value */
++  if (l->rtype == __AVvoid) {
++  } else
++  if (l->rtype == __AVword) {
++    RETURN(__avword, i);
++  } else
++  if (l->rtype == __AVchar) {
++    RETURN(char, i);
++  } else
++  if (l->rtype == __AVschar) {
++    RETURN(signed char, i);
++  } else
++  if (l->rtype == __AVuchar) {
++    RETURN(unsigned char, i);
++  } else
++  if (l->rtype == __AVshort) {
++    RETURN(short, i);
++  } else
++  if (l->rtype == __AVushort) {
++    RETURN(unsigned short, i);
++  } else
++  if (l->rtype == __AVint) {
++    RETURN(int, i);
++  } else
++  if (l->rtype == __AVuint) {
++    RETURN(unsigned int, i);
++  } else
++  if (l->rtype == __AVlong) {
++    RETURN(long, i);
++  } else
++  if (l->rtype == __AVulong) {
++    RETURN(unsigned long, i);
++  } else
++  if (l->rtype == __AVlonglong) {
++    RETURN(long long, i);
++  } else
++  if (l->rtype == __AVulonglong) {
++    RETURN(unsigned long long, i);
++  } else
++  if (l->rtype == __AVfloat) {
++    RETURN(float, fret);
++  } else
++  if (l->rtype == __AVdouble) {
++    RETURN(double, dret);
++  } else
++  if (l->rtype == __AVvoidp) {
++    RETURN(void*, i);
++  } else
++  if (l->rtype == __AVstruct) {
++    /* __AV_PCC_STRUCT_RETURN and __AV_REGISTER_STRUCT_RETURN are ignored */
++  }
++  return 0;
++}
++
++#endif /*_avcall_powerpc64_c */
+Index: ffcall-1.10+cvs20100619/avcall/avcall-powerpc64le.s
+===================================================================
+--- /dev/null
++++ ffcall-1.10+cvs20100619/avcall/avcall-powerpc64le.s
+@@ -0,0 +1,191 @@
++	.file	"avcall-powerpc64le.c"
++	.section	".text"
++	.align 2
++	.globl __builtin_avcall
++__builtin_avcall:
++	addis	%r2, %r12, .TOC.-__builtin_avcall@ha
++	addi	%r2, %r2, .TOC.-__builtin_avcall@l
++	.localentry	__builtin_avcall, .-__builtin_avcall
++	.globl	.__builtin_avcall
++.__builtin_avcall:
++	mflr 0
++	std 31,-8(1)
++	mr 31,3
++	std 29,-24(1)
++	std 0,16(1)
++	stdu 1,-2192(1)
++	ld 9,40(3)
++	li 3,8
++	addi 0,1,112
++	subf 9,31,9
++	addi 9,9,-48
++	sradi 9,9,3
++	extsw 9,9
++	cmpd 7,3,9
++	bge- 7,.L82
++	addi 9,9,-8
++	mtctr 9
++.L83:
++	sldi 9,3,3
++	addi 3,3,1
++	add 11,9,31
++	add 9,9,0
++	lfd 0,48(11)
++	stfd 0,-64(9)
++	bdnz .L83
++.L82:
++	ld 9,2096(31)
++	subf 9,31,9
++	addi 9,9,-2104
++	sradi 9,9,3
++	extsw 9,9
++	cmpwi 7,9,0
++	beq- 7,.L8
++	cmpwi 7,9,1
++	beq- 7,.L11
++	cmpwi 7,9,2
++	beq- 7,.L14
++	cmpwi 7,9,3
++	beq- 7,.L17
++	cmpwi 7,9,4
++	beq- 7,.L20
++	cmpwi 7,9,5
++	beq- 7,.L23
++	cmpwi 7,9,6
++	beq- 7,.L26
++	cmpwi 7,9,7
++	beq- 7,.L29
++	cmpwi 7,9,8
++	beq- 7,.L32
++	cmpwi 7,9,9
++	beq- 7,.L35
++	cmpwi 7,9,10
++	beq- 7,.L38
++	cmpwi 7,9,11
++	beq- 7,.L41
++	cmpwi 7,9,12
++	beq- 7,.L44
++.L47:
++	lfd 13,2200(31)
++.L44:
++	lfd 12,2192(31)
++.L41:
++	lfd 11,2184(31)
++.L38:
++	lfd 10,2176(31)
++.L35:
++	lfd 9,2168(31)
++.L32:
++	lfd 8,2160(31)
++.L29:
++	lfd 7,2152(31)
++.L26:
++	lfd 6,2144(31)
++.L23:
++	lfd 5,2136(31)
++.L20:
++	lfd 4,2128(31)
++.L17:
++	lfd 3,2120(31)
++.L14:
++	lfd 2,2112(31)
++.L11:
++	lfd 1,2104(31)
++.L8:
++	ld 29,0(31)
++	ld 9,96(31)
++	ld 0,0(29)
++	ld 3,48(31)
++	ld 4,56(31)
++	mtctr 0
++	ld 5,64(31)
++	ld 6,72(31)
++	ld 7,80(31)
++	ld 8,88(31)
++	ld 10,104(31)
++	std 2,40(1)
++	ld 11,16(29)
++	ld 2,8(29)
++	bctrl
++	ld 2,40(1)
++	lwz 9,24(31)
++	cmpwi 7,9,1
++	beq- 7,.L49
++	cmpwi 7,9,0
++	beq- 7,.L84
++	cmpwi 7,9,2
++	beq- 7,.L87
++	cmpwi 7,9,3
++	beq- 7,.L87
++	cmpwi 7,9,4
++	beq- 7,.L87
++	cmpwi 7,9,5
++	beq- 7,.L86
++	cmpwi 7,9,6
++	beq- 7,.L86
++	cmpwi 7,9,7
++	beq- 7,.L85
++	cmpwi 7,9,8
++	beq- 7,.L85
++	cmpwi 7,9,9
++	beq- 7,.L84
++	cmpwi 7,9,10
++	beq- 7,.L84
++	cmpwi 7,9,11
++	beq- 7,.L84
++	cmpwi 7,9,12
++	beq- 7,.L84
++	cmpwi 7,9,13
++	beq- 7,.L89
++	cmpwi 7,9,14
++	beq- 7,.L90
++	cmpwi 7,9,15
++	beq- 7,.L84
++.L49:
++	addi 1,1,2192
++	li 3,0
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	ld 31,-8(1)
++	blr
++.L84:
++	ld 9,16(31)
++	std 3,0(9)
++	addi 1,1,2192
++	li 3,0
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	ld 31,-8(1)
++	blr
++.L87:
++	ld 9,16(31)
++	stb 3,0(9)
++	addi 1,1,2192
++	li 3,0
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	ld 31,-8(1)
++	blr
++.L86:
++	ld 9,16(31)
++	sth 3,0(9)
++	b .L49
++.L85:
++	ld 9,16(31)
++	stw 3,0(9)
++	b .L49
++.L89:
++	ld 9,16(31)
++	stfs 1,0(9)
++	b .L49
++.L90:
++	ld 9,16(31)
++	stfd 1,0(9)
++	b .L49
++	.long 0
++	.byte 0,0,0,1,128,3,0,0
++	.size	.__builtin_avcall,.-.__builtin_avcall
++	.ident	"GCC: (GNU) 3.3.3 (SuSE Linux)"
+Index: ffcall-1.10+cvs20100619/callback/vacall_r/vacall-powerpc64le.c
+===================================================================
+--- /dev/null
++++ ffcall-1.10+cvs20100619/callback/vacall_r/vacall-powerpc64le.c
+@@ -0,0 +1,133 @@
++/* vacall function for powerpc64 CPU */
++
++/*
++ * Copyright 1995-2006 Bruno Haible, <br...@clisp.org>
++ * Copyright 2000 Adam Fedor, <fe...@gnu.org>
++ * Copyright 2004 Paul Guyot, <pgu...@kallisys.net>
++ *
++ * This is free software distributed under the GNU General Public Licence
++ * described in the file COPYING. Contact the author if you don't have this
++ * or can't live with it. There is ABSOLUTELY NO WARRANTY, explicit or implied,
++ * on this software.
++ */
++
++#ifndef REENTRANT
++#include "vacall.h.in"
++#else /* REENTRANT */
++#include "vacall_r.h.in"
++#endif
++
++#ifdef REENTRANT
++#define __vacall __vacall_r
++register struct { void (*vacall_function) (void*,va_alist); void* arg; }
++         *		env	__asm__("r11");
++#endif
++register double		farg1	__asm__("fr1");
++register double		farg2	__asm__("fr2");
++register double		farg3	__asm__("fr3");
++register double		farg4	__asm__("fr4");
++register double		farg5	__asm__("fr5");
++register double		farg6	__asm__("fr6");
++register double		farg7	__asm__("fr7");
++register double		farg8	__asm__("fr8");
++register double		farg9	__asm__("fr9");
++register double		farg10	__asm__("fr10");
++register double		farg11	__asm__("fr11");
++register double		farg12	__asm__("fr12");
++register double		farg13	__asm__("fr13");
++register __vaword	iret	__asm__("r3");
++register float		fret	__asm__("fr1");
++register double		dret	__asm__("fr1");
++
++void /* the return type is variable, not void! */
++__vacall (__vaword word1, __vaword word2, __vaword word3, __vaword word4,
++          __vaword word5, __vaword word6, __vaword word7, __vaword word8,
++          __vaword firstword)
++{
++  __va_alist list;
++  /* When a parameter is passed in a register,
++   * stack space is still allocated for it.
++   */
++  /* Move the arguments passed in registers to their stack locations. */
++  (&firstword)[-8] = word1;
++  (&firstword)[-7] = word2;
++  (&firstword)[-6] = word3;
++  (&firstword)[-5] = word4;
++  (&firstword)[-4] = word5;
++  (&firstword)[-3] = word6;
++  (&firstword)[-2] = word7;
++  (&firstword)[-1] = word8;
++  list.farg[0] = farg1;
++  list.farg[1] = farg2;
++  list.farg[2] = farg3;
++  list.farg[3] = farg4;
++  list.farg[4] = farg5;
++  list.farg[5] = farg6;
++  list.farg[6] = farg7;
++  list.farg[7] = farg8;
++  list.farg[8] = farg9;
++  list.farg[9] = farg10;
++  list.farg[10] = farg11;
++  list.farg[11] = farg12;
++  list.farg[12] = farg13;
++  /* Prepare the va_alist. */
++  list.flags = 0;
++  list.aptr = (long)(&firstword - 8);
++  list.raddr = (void*)0;
++  list.rtype = __VAvoid;
++  list.memfargptr = &list.farg[0];
++  /* Call vacall_function. The macros do all the rest. */
++#ifndef REENTRANT
++  (*vacall_function) (&list);
++#else /* REENTRANT */
++  (*env->vacall_function) (env->arg,&list);
++#endif
++  /* Put return value into proper register. */
++  if (list.rtype == __VAvoid) {
++  } else
++  if (list.rtype == __VAchar) {
++    iret = list.tmp._char;
++  } else
++  if (list.rtype == __VAschar) {
++    iret = list.tmp._schar;
++  } else
++  if (list.rtype == __VAuchar) {
++    iret = list.tmp._uchar;
++  } else
++  if (list.rtype == __VAshort) {
++    iret = list.tmp._short;
++  } else
++  if (list.rtype == __VAushort) {
++    iret = list.tmp._ushort;
++  } else
++  if (list.rtype == __VAint) {
++    iret = list.tmp._int;
++  } else
++  if (list.rtype == __VAuint) {
++    iret = list.tmp._uint;
++  } else
++  if (list.rtype == __VAlong) {
++    iret = list.tmp._long;
++  } else
++  if (list.rtype == __VAulong) {
++    iret = list.tmp._ulong;
++  } else
++  if (list.rtype == __VAlonglong) {
++    iret = list.tmp._long;
++  } else
++  if (list.rtype == __VAulonglong) {
++    iret = list.tmp._ulong;
++  } else
++  if (list.rtype == __VAfloat) {
++    fret = list.tmp._float;
++  } else
++  if (list.rtype == __VAdouble) {
++    dret = list.tmp._double;
++  } else
++  if (list.rtype == __VAvoidp) {
++    iret = (long)list.tmp._ptr;
++  } else
++  if (list.rtype == __VAstruct) {
++    /* __VA_PCC_STRUCT_RETURN and __VA_REGISTER_STRUCT_RETURN are ignored */
++  }
++}
+Index: ffcall-1.10+cvs20100619/callback/vacall_r/vacall-powerpc64le.s
+===================================================================
+--- /dev/null
++++ ffcall-1.10+cvs20100619/callback/vacall_r/vacall-powerpc64le.s
+@@ -0,0 +1,135 @@
++	.file	"vacall-powerpc64.c"
++	.section	".toc","aw"
++	.section	".text"
++	.align 2
++	.p2align 4,,15
++	.globl __vacall_r
++__vacall_r:
++	addis	%r2, %r12, .TOC.-__vacall_r@ha
++	addi	%r2, %r2, .TOC.-__vacall_r@l
++	.localentry	__vacall_r, .-__vacall_r
++.L.__vacall_r:
++	mflr 0
++	std 29,-24(1)
++	li 29,0
++	std 0,16(1)
++	stdu 1,-400(1)
++	li 0,0
++	stw 29,136(1)
++	stw 0,112(1)
++	addi 0,1,448
++	std 9,496(1)
++	std 3,448(1)
++	std 4,456(1)
++	std 29,128(1)
++	std 5,464(1)
++	std 6,472(1)
++	std 7,480(1)
++	std 8,488(1)
++	std 10,504(1)
++	std 0,120(1)
++	stfd 1,176(1)
++	stfd 2,184(1)
++	addi 9,1,176
++	stfd 3,192(1)
++	stfd 4,200(1)
++	stfd 5,208(1)
++	stfd 6,216(1)
++	addi 4,1,112
++	std 9,168(1)
++	stfd 7,224(1)
++	stfd 8,232(1)
++	stfd 9,240(1)
++	stfd 10,248(1)
++	stfd 11,256(1)
++	stfd 12,264(1)
++	stfd 13,272(1)
++	ld 9,0(11)
++	ld 3,8(11)
++	ld 0,0(9)
++	std 2,40(1)
++	mtctr 0
++	ld 11,16(9)
++	ld 2,8(9)
++	bctrl
++	ld 2,40(1)
++	lwz 0,136(1)
++	cmpdi 7,0,0
++	beq 7,.L31
++	cmpwi 7,0,1
++	beq 7,.L32
++	cmpwi 7,0,2
++	beq 7,.L35
++	cmpwi 7,0,3
++	beq 7,.L32
++	cmpwi 7,0,4
++	beq 7,.L36
++	cmpwi 7,0,5
++	beq 7,.L37
++	cmpwi 7,0,6
++	beq 7,.L38
++	cmpwi 7,0,7
++	beq 7,.L39
++	cmpwi 7,0,8
++	beq 7,.L33
++	cmpwi 7,0,9
++	beq 7,.L33
++	cmpwi 7,0,10
++	beq 7,.L33
++	cmpwi 7,0,11
++	beq 7,.L33
++	cmpwi 7,0,12
++	beq 7,.L40
++	cmpwi 7,0,13
++	beq 7,.L41
++	cmpwi 7,0,14
++	beq 7,.L33
++	.p2align 4,,15
++.L31:
++	addi 1,1,400
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	blr
++	.p2align 4,,15
++.L32:
++	lbz 3,152(1)
++	addi 1,1,400
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	blr
++.L36:
++	lha 3,152(1)
++	b .L31
++	.p2align 4,,15
++.L35:
++	lbz 0,152(1)
++	addi 1,1,400
++	ld 29,-24(1)
++	extsb 3,0
++	ld 0,16(1)
++	mtlr 0
++	blr
++.L33:
++	ld 3,152(1)
++	b .L31
++.L37:
++	lhz 3,152(1)
++	b .L31
++.L38:
++	lwa 3,152(1)
++	b .L31
++.L39:
++	lwz 3,152(1)
++	b .L31
++.L40:
++	lfs 1,152(1)
++	b .L31
++.L41:
++	lfd 1,152(1)
++	b .L31
++	.long 0
++	.byte 0,0,0,1,128,3,0,0
++	.size	__vacall_r,.-.L.__vacall_r
++	.ident	"GCC: (GNU) 4.0.2"
+Index: ffcall-1.10+cvs20100619/vacall/vacall-powerpc64le.c
+===================================================================
+--- /dev/null
++++ ffcall-1.10+cvs20100619/vacall/vacall-powerpc64le.c
+@@ -0,0 +1,133 @@
++/* vacall function for powerpc64 CPU */
++
++/*
++ * Copyright 1995-2006 Bruno Haible, <br...@clisp.org>
++ * Copyright 2000 Adam Fedor, <fe...@gnu.org>
++ * Copyright 2004 Paul Guyot, <pgu...@kallisys.net>
++ *
++ * This is free software distributed under the GNU General Public Licence
++ * described in the file COPYING. Contact the author if you don't have this
++ * or can't live with it. There is ABSOLUTELY NO WARRANTY, explicit or implied,
++ * on this software.
++ */
++
++#ifndef REENTRANT
++#include "vacall.h.in"
++#else /* REENTRANT */
++#include "vacall_r.h.in"
++#endif
++
++#ifdef REENTRANT
++#define __vacall __vacall_r
++register struct { void (*vacall_function) (void*,va_alist); void* arg; }
++         *		env	__asm__("r11");
++#endif
++register double		farg1	__asm__("fr1");
++register double		farg2	__asm__("fr2");
++register double		farg3	__asm__("fr3");
++register double		farg4	__asm__("fr4");
++register double		farg5	__asm__("fr5");
++register double		farg6	__asm__("fr6");
++register double		farg7	__asm__("fr7");
++register double		farg8	__asm__("fr8");
++register double		farg9	__asm__("fr9");
++register double		farg10	__asm__("fr10");
++register double		farg11	__asm__("fr11");
++register double		farg12	__asm__("fr12");
++register double		farg13	__asm__("fr13");
++register __vaword	iret	__asm__("r3");
++register float		fret	__asm__("fr1");
++register double		dret	__asm__("fr1");
++
++void /* the return type is variable, not void! */
++__vacall (__vaword word1, __vaword word2, __vaword word3, __vaword word4,
++          __vaword word5, __vaword word6, __vaword word7, __vaword word8,
++          __vaword firstword)
++{
++  __va_alist list;
++  /* When a parameter is passed in a register,
++   * stack space is still allocated for it.
++   */
++  /* Move the arguments passed in registers to their stack locations. */
++  (&firstword)[-8] = word1;
++  (&firstword)[-7] = word2;
++  (&firstword)[-6] = word3;
++  (&firstword)[-5] = word4;
++  (&firstword)[-4] = word5;
++  (&firstword)[-3] = word6;
++  (&firstword)[-2] = word7;
++  (&firstword)[-1] = word8;
++  list.farg[0] = farg1;
++  list.farg[1] = farg2;
++  list.farg[2] = farg3;
++  list.farg[3] = farg4;
++  list.farg[4] = farg5;
++  list.farg[5] = farg6;
++  list.farg[6] = farg7;
++  list.farg[7] = farg8;
++  list.farg[8] = farg9;
++  list.farg[9] = farg10;
++  list.farg[10] = farg11;
++  list.farg[11] = farg12;
++  list.farg[12] = farg13;
++  /* Prepare the va_alist. */
++  list.flags = 0;
++  list.aptr = (long)(&firstword - 8);
++  list.raddr = (void*)0;
++  list.rtype = __VAvoid;
++  list.memfargptr = &list.farg[0];
++  /* Call vacall_function. The macros do all the rest. */
++#ifndef REENTRANT
++  (*vacall_function) (&list);
++#else /* REENTRANT */
++  (*env->vacall_function) (env->arg,&list);
++#endif
++  /* Put return value into proper register. */
++  if (list.rtype == __VAvoid) {
++  } else
++  if (list.rtype == __VAchar) {
++    iret = list.tmp._char;
++  } else
++  if (list.rtype == __VAschar) {
++    iret = list.tmp._schar;
++  } else
++  if (list.rtype == __VAuchar) {
++    iret = list.tmp._uchar;
++  } else
++  if (list.rtype == __VAshort) {
++    iret = list.tmp._short;
++  } else
++  if (list.rtype == __VAushort) {
++    iret = list.tmp._ushort;
++  } else
++  if (list.rtype == __VAint) {
++    iret = list.tmp._int;
++  } else
++  if (list.rtype == __VAuint) {
++    iret = list.tmp._uint;
++  } else
++  if (list.rtype == __VAlong) {
++    iret = list.tmp._long;
++  } else
++  if (list.rtype == __VAulong) {
++    iret = list.tmp._ulong;
++  } else
++  if (list.rtype == __VAlonglong) {
++    iret = list.tmp._long;
++  } else
++  if (list.rtype == __VAulonglong) {
++    iret = list.tmp._ulong;
++  } else
++  if (list.rtype == __VAfloat) {
++    fret = list.tmp._float;
++  } else
++  if (list.rtype == __VAdouble) {
++    dret = list.tmp._double;
++  } else
++  if (list.rtype == __VAvoidp) {
++    iret = (long)list.tmp._ptr;
++  } else
++  if (list.rtype == __VAstruct) {
++    /* __VA_PCC_STRUCT_RETURN and __VA_REGISTER_STRUCT_RETURN are ignored */
++  }
++}
+Index: ffcall-1.10+cvs20100619/vacall/vacall-powerpc64le.s
+===================================================================
+--- /dev/null
++++ ffcall-1.10+cvs20100619/vacall/vacall-powerpc64le.s
+@@ -0,0 +1,141 @@
++	.file	"vacall-powerpc64.c"
++	.section	".toc","aw"
++	.section	".text"
++	.section	".toc","aw"
++.LC0:
++	.tc vacall_function[TC],vacall_function
++	.section	".text"
++	.align 2
++	.p2align 4,,15
++	.globl __vacall
++__vacall:
++	addis	%r2, %r12, .TOC.-__vacall@ha
++	addi	%r2, %r2, .TOC.-__vacall@l
++	.localentry __vacall, .-__vacall
++.L.__vacall:
++	mflr 0
++	std 29,-24(1)
++	li 29,0
++	ld 11,.LC0@toc(2)
++	std 0,16(1)
++	stdu 1,-400(1)
++	li 0,0
++	stw 29,136(1)
++	stw 0,112(1)
++	addi 0,1,448
++	std 9,496(1)
++	std 3,448(1)
++	std 4,456(1)
++	std 5,464(1)
++	std 6,472(1)
++	std 7,480(1)
++	std 8,488(1)
++	std 10,504(1)
++	std 0,120(1)
++	stfd 1,176(1)
++	addi 9,1,176
++	stfd 2,184(1)
++	stfd 3,192(1)
++	stfd 4,200(1)
++	stfd 5,208(1)
++	addi 3,1,112
++	std 9,168(1)
++	stfd 6,216(1)
++	stfd 7,224(1)
++	stfd 8,232(1)
++	stfd 9,240(1)
++	stfd 10,248(1)
++	stfd 11,256(1)
++	stfd 12,264(1)
++	stfd 13,272(1)
++	std 29,128(1)
++	ld 9,0(11)
++	nop
++	nop
++	ld 0,0(9)
++	std 2,40(1)
++	mtctr 0
++	ld 11,16(9)
++	ld 2,8(9)
++	bctrl
++	ld 2,40(1)
++	lwz 0,136(1)
++	cmpdi 7,0,0
++	beq 7,.L31
++	cmpwi 7,0,1
++	beq 7,.L32
++	cmpwi 7,0,2
++	beq 7,.L35
++	cmpwi 7,0,3
++	beq 7,.L32
++	cmpwi 7,0,4
++	beq 7,.L36
++	cmpwi 7,0,5
++	beq 7,.L37
++	cmpwi 7,0,6
++	beq 7,.L38
++	cmpwi 7,0,7
++	beq 7,.L39
++	cmpwi 7,0,8
++	beq 7,.L33
++	cmpwi 7,0,9
++	beq 7,.L33
++	cmpwi 7,0,10
++	beq 7,.L33
++	cmpwi 7,0,11
++	beq 7,.L33
++	cmpwi 7,0,12
++	beq 7,.L40
++	cmpwi 7,0,13
++	beq 7,.L41
++	cmpwi 7,0,14
++	beq 7,.L33
++	.p2align 4,,15
++.L31:
++	addi 1,1,400
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	blr
++	.p2align 4,,15
++.L32:
++	lbz 3,152(1)
++	addi 1,1,400
++	ld 0,16(1)
++	ld 29,-24(1)
++	mtlr 0
++	blr
++.L36:
++	lha 3,152(1)
++	b .L31
++	.p2align 4,,15
++.L35:
++	lbz 0,152(1)
++	addi 1,1,400
++	ld 29,-24(1)
++	extsb 3,0
++	ld 0,16(1)
++	mtlr 0
++	blr
++.L33:
++	ld 3,152(1)
++	b .L31
++.L37:
++	lhz 3,152(1)
++	b .L31
++.L38:
++	lwa 3,152(1)
++	b .L31
++.L39:
++	lwz 3,152(1)
++	b .L31
++.L40:
++	lfs 1,152(1)
++	b .L31
++.L41:
++	lfd 1,152(1)
++	b .L31
++	.long 0
++	.byte 0,0,0,1,128,3,0,0
++	.size	__vacall,.-.L.__vacall
++	.ident	"GCC: (GNU) 4.0.2"
+Index: ffcall-1.10+cvs20100619/avcall/avcall.h.in
+===================================================================
+--- ffcall-1.10+cvs20100619.orig/avcall/avcall.h.in
++++ ffcall-1.10+cvs20100619/avcall/avcall.h.in
+@@ -1144,7 +1144,7 @@ typedef struct
+  * different alignment.
+  */
+ /* little endian -> small structures < 1 word are adjusted to the left */
+-#if defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__ARMEL__) || (defined(__ia64__) && defined(__GNUC__) && (__GNUC__ >= 3))
++#if (defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)) || defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__ARMEL__) || (defined(__ia64__) && defined(__GNUC__) && (__GNUC__ >= 3))
+ #define __av_struct(LIST,TYPE,TYPE_SIZE,TYPE_ALIGN,ASSIGN,VAL)		\
+   (((LIST).aptr =							\
+     (__avword*)(((__avword)(LIST).aptr+(TYPE_SIZE)+__av_struct_alignment(TYPE_ALIGN)-1) & -(long)__av_struct_alignment(TYPE_ALIGN)))\
+Index: ffcall-1.10+cvs20100619/callback/vacall_r/vacall_r.h.in
+===================================================================
+--- ffcall-1.10+cvs20100619.orig/callback/vacall_r/vacall_r.h.in
++++ ffcall-1.10+cvs20100619/callback/vacall_r/vacall_r.h.in
+@@ -714,12 +714,12 @@ typedef __va_alist* va_alist;
+    (LIST)->aptr + ((-(TYPE_SIZE)) & 3)					\
+   )
+ #endif
+-#if defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__ARMEL__) || ((defined(__mipsn32__) || defined(__mips64__)) && defined(_MIPSEL))
++#if defined(__i386__) || (defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)) || defined(__alpha__) || defined(__ia64__) || defined(__ARMEL__) || ((defined(__mipsn32__) || defined(__mips64__)) && defined(_MIPSEL))
+ /* little endian -> small args < 1 word are adjusted to the left */
+ #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
+   __va_arg_leftadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
+ #endif
+-#if defined(__m68k__) || ((defined(__mipsn32__) || defined(__mips64__)) && defined(_MIPSEB)) || defined(__sparc__) || defined(__sparc64__) || defined(__hppa__) || (defined(__arm__) && !defined(__ARMEL__)) || defined(__powerpc__) || defined(__powerpc64__) || defined(__m88k__) || defined(__convex__) || defined(__s390__)
++#if defined(__m68k__) || ((defined(__mipsn32__) || defined(__mips64__)) && defined(_MIPSEB)) || defined(__sparc__) || defined(__sparc64__) || defined(__hppa__) || (defined(__arm__) && !defined(__ARMEL__)) || ((defined(__powerpc__) || defined(__powerpc64__)) && !defined(__LITTLE_ENDIAN__)) || defined(__m88k__) || defined(__convex__) || defined(__s390__)
+ /* big endian -> small args < 1 word are adjusted to the right */
+ #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
+   __va_arg_rightadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
+Index: ffcall-1.10+cvs20100619/vacall/vacall.h.in
+===================================================================
+--- ffcall-1.10+cvs20100619.orig/vacall/vacall.h.in
++++ ffcall-1.10+cvs20100619/vacall/vacall.h.in
+@@ -715,12 +715,12 @@ typedef __va_alist* va_alist;
+    (LIST)->aptr + ((-(TYPE_SIZE)) & 3)					\
+   )
+ #endif
+-#if defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__ARMEL__)
++#if defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__ARMEL__) || (defined(__powerpc64__) && defined(__LITTLE_ENDIAN__))
+ /* little endian -> small args < 1 word are adjusted to the left */
+ #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
+   __va_arg_leftadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
+ #endif
+-#if defined(__m68k__) || defined(__mipsn32__) || defined(__mips64__) || defined(__sparc__) || defined(__sparc64__) || defined(__hppa__) || (defined(__arm__) && !defined(__ARMEL__)) || defined(__powerpc__) || defined(__powerpc64__) || defined(__m88k__) || defined(__convex__) || defined(__s390__)
++#if defined(__m68k__) || defined(__mipsn32__) || defined(__mips64__) || defined(__sparc__) || defined(__sparc64__) || defined(__hppa__) || (defined(__arm__) && !defined(__ARMEL__)) || ((defined(__powerpc__) || defined(__powerpc64__)) && !defined(__LITTLE_ENDIAN__)) || defined(__m88k__) || defined(__convex__) || defined(__s390__)
+ /* big endian -> small args < 1 word are adjusted to the right */
+ #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
+   __va_arg_rightadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
+Index: ffcall-1.10+cvs20100619/avcall/Makefile.in
+===================================================================
+--- ffcall-1.10+cvs20100619.orig/avcall/Makefile.in
++++ ffcall-1.10+cvs20100619/avcall/Makefile.in
+@@ -142,6 +142,9 @@ avcall-powerpc.lo : $(srcdir)/avcall-pow
+ avcall-powerpc64.lo : $(srcdir)/avcall-powerpc64.s
+ 	$(LIBTOOL_COMPILE) $(CC) @GCC_X_NONE@ -c $(srcdir)/avcall-powerpc64.s
+ 
++avcall-powerpc64le.lo : $(srcdir)/avcall-powerpc64le.s
++	$(LIBTOOL_COMPILE) $(CC) @GCC_X_NONE@ -c $(srcdir)/avcall-powerpc64le.s
++
+ avcall-m88k.lo : $(srcdir)/avcall-m88k.s
+ 	$(LIBTOOL_COMPILE) $(CC) @GCC_X_NONE@ -c $(srcdir)/avcall-m88k.s
+ 
+Index: ffcall-1.10+cvs20100619/callback/vacall_r/Makefile.in
+===================================================================
+--- ffcall-1.10+cvs20100619.orig/callback/vacall_r/Makefile.in
++++ ffcall-1.10+cvs20100619/callback/vacall_r/Makefile.in
+@@ -144,6 +144,9 @@ vacall-powerpc.lo : $(srcdir)/vacall-pow
+ vacall-powerpc64.lo : $(srcdir)/vacall-powerpc64.s
+ 	$(LIBTOOL_COMPILE) $(CC) @GCC_X_NONE@ -c $(srcdir)/vacall-powerpc64.s
+ 
++vacall-powerpc64le.lo : $(srcdir)/vacall-powerpc64le.s
++	$(LIBTOOL_COMPILE) $(CC) @GCC_X_NONE@ -c $(srcdir)/vacall-powerpc64le.s
++
+ vacall-m88k.lo : $(srcdir)/vacall-m88k.s
+ 	$(LIBTOOL_COMPILE) $(CC) @GCC_X_NONE@ -c $(srcdir)/vacall-m88k.s
+ 
+Index: ffcall-1.10+cvs20100619/vacall/Makefile.in
+===================================================================
+--- ffcall-1.10+cvs20100619.orig/vacall/Makefile.in
++++ ffcall-1.10+cvs20100619/vacall/Makefile.in
+@@ -126,6 +126,9 @@ vacall-powerpc.o : $(srcdir)/vacall-powe
+ vacall-powerpc64.o : $(srcdir)/vacall-powerpc64.s
+ 	$(CC) @GCC_X_NONE@ -c $(srcdir)/vacall-powerpc64.s
+ 
++vacall-powerpc64le.o : $(srcdir)/vacall-powerpc64le.s
++	$(CC) @GCC_X_NONE@ -c $(srcdir)/vacall-powerpc64le.s
++
+ vacall-m88k.o : $(srcdir)/vacall-m88k.s
+ 	$(CC) @GCC_X_NONE@ -c $(srcdir)/vacall-m88k.s
+ 
diff -Nru ffcall-1.10+cvs20100619/debian/patches/series ffcall-1.10+cvs20100619/debian/patches/series
--- ffcall-1.10+cvs20100619/debian/patches/series	2013-12-24 11:50:11.000000000 +0000
+++ ffcall-1.10+cvs20100619/debian/patches/series	2014-11-06 01:57:44.000000000 +0000
@@ -1,2 +1,3 @@
 fix-powerpcspe.patch
 0001-fix-callback-on-x86_64.patch
+ppc64el-elfv2.patch

Reply via email to