Can you explain what's the deal with IV?

As I understand it: ...

I see. New template simply returns eax... And it works correctly with push/pop %ebx.

And now the return value should be treated correctly. As far as understand it can point at the last block in chunk and therefore we need to copy it back to cdata->iv, as it might reside in realigned buffer and we can zap it on next loop spin. Right?


I've rearranged the code and the order of arguments in order to facilitate porting to another platform. All commented...

Can you verify this code? A.
/* 
 * Support for VIA PadLock Advanced Cryptography Engine (ACE)
 * Written by Michal Ludvig <[EMAIL PROTECTED]>
 *            http://www.logix.cz/michal
 *
 * Date: May 13, 2004
 */

/* ====================================================================
 * Copyright (c) 1999-2001 The OpenSSL Project.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *
 * 3. All advertising materials mentioning features or use of this
 *    software must display the following acknowledgment:
 *    "This product includes software developed by the OpenSSL Project
 *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
 *
 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
 *    endorse or promote products derived from this software without
 *    prior written permission. For written permission, please contact
 *    [EMAIL PROTECTED]
 *
 * 5. Products derived from this software may not be called "OpenSSL"
 *    nor may "OpenSSL" appear in their names without prior written
 *    permission of the OpenSSL Project.
 *
 * 6. Redistributions of any form whatsoever must retain the following
 *    acknowledgment:
 *    "This product includes software developed by the OpenSSL Project
 *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
 *
 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 * OF THE POSSIBILITY OF SUCH DAMAGE.
 * ====================================================================
 *
 * This product includes cryptographic software written by Eric Young
 * ([EMAIL PROTECTED]).  This product includes software written by Tim
 * Hudson ([EMAIL PROTECTED]).
 *
 */


#include <stdio.h>
#include <string.h>

#include <openssl/crypto.h>
#include <openssl/dso.h>
#include <openssl/engine.h>
#include <openssl/evp.h>
#include <openssl/aes.h>

#ifndef OPENSSL_NO_HW
#ifndef OPENSSL_NO_HW_PADLOCK

/* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
#if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
#  ifndef OPENSSL_NO_DYNAMIC_ENGINE
#    define DYNAMIC_ENGINE
#  endif
#elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
#  ifdef ENGINE_DYNAMIC_SUPPORT
#    define DYNAMIC_ENGINE
#  endif
#else
#  error "Only OpenSSL >= 0.9.7 is supported"
#endif

/* VIA PadLock AES is available *ONLY* on some x86 CPUs.
   Not only that it doesn't exist elsewhere, but it
   even can't be compiled on other platforms!
 
   In addition, because of the heavy use of inline assembler,
   you must use GNU GCC for now. This is not a "technology
   limitation", I simply didn't have a chance to test other
   compilers and instead chosen the safe way :-) */
#if !defined(I386_ONLY) && (defined(__i386__) || defined(__i386) || defined(_M_IX86))
#define COMPILE_HW_PADLOCK
#else
#undef COMPILE_HW_PADLOCK
#endif

static ENGINE *ENGINE_padlock (void);

void ENGINE_load_padlock (void)
{
/* On non-x86 CPUs it just returns. */
#ifdef COMPILE_HW_PADLOCK
	ENGINE *toadd = ENGINE_padlock ();
	if (!toadd) return;
	ENGINE_add (toadd);
	ENGINE_free (toadd);
	ERR_clear_error ();
#endif
}

#ifdef COMPILE_HW_PADLOCK
/* Function for ENGINE detection and control */
static int padlock_available(void);
static int padlock_init(ENGINE *e);

/* RNG Stuff */
static RAND_METHOD padlock_rand;

/* Cipher Stuff */
static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);

/* Engine names */
static const char *padlock_id = "padlock";
static char padlock_name[100];

/* Available features */
static int padlock_use_ace = 0;	/* Advanced Cryptography Engine */
static int padlock_use_rng = 0;	/* Random Number Generator */

/* ===== Engine "management" functions ===== */

/* Prepare the ENGINE structure for registration */
static int
padlock_bind_helper(ENGINE *e)
{
	/* Check available features */
	padlock_available();

	/* Generate a nice engine name with available features */
	snprintf(padlock_name, sizeof(padlock_name), "VIA PadLock (%s, %s)", 
		 padlock_use_rng ? "RNG" : "no-RNG",
		 padlock_use_ace ? "ACE" : "no-ACE");

	/* Register everything or return with an error */ 
	if (!ENGINE_set_id(e, padlock_id) ||
	    !ENGINE_set_name(e, padlock_name) ||

	    !ENGINE_set_init_function(e, padlock_init) ||

	    (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
	    (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
		return 0;
	}

	/* Everything looks good */
	return 1;
}

/* Constructor */
static ENGINE *
ENGINE_padlock(void)
{
	ENGINE *eng = ENGINE_new();

	if (!eng) {
		return NULL;
	}

	if (!padlock_bind_helper(eng)) {
		ENGINE_free(eng);
		return NULL;
	}

	return eng;
}

/* Some AES-related constants */
#define AES_BLOCK_SIZE		16
#define AES_KEY_SIZE_128	16
#define AES_KEY_SIZE_192	24
#define AES_KEY_SIZE_256	32

#define AES_KEY_WORDS		(4 * (AES_MAXNR + 1))
#define	AES_KEY_BYTES		(AES_KEY_WORDS * 4)

/* Here we store the plain key for AES128
   and the extended key for AES192/AES256 */
struct padlock_cipher_data
{	unsigned char iv[AES_BLOCK_SIZE];	/* Initialization vector */
	union {	unsigned int cword[4];
		struct {
			int rounds:4;
			int algo:3;
			int keygen:1;
			int interm:1;
			int encdec:1;
			int ksize:2;
		} b;
	} cword;		/* Control word */
	AES_KEY ks;		/* Encryption key */
};

/*
 * =======================================================
 * Inline assembler section(s).
 * =======================================================
 * Order of arguments is chosen to facilitate Windows port
 * using __fastcall calling convention. If you wish to add
 * more routines, keep in mind that in __fastcall first
 * argument is passed in %ecx and second - in %edx.
 * =======================================================
 */
#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_INLINE_ASM)
/*
 * As for excessive "push %ebx"/"pop %ebx" found all over.
 * When generating position-independent code GCC won't let
 * us use "b" in assembler templates nor even respect "ebx"
 * in "clobber description." Therefore the trouble...
 */
/* Helper function - check if a CPUID instruction
   is available on this CPU */
static int
padlock_insn_cpuid_available(void)
{
	int result = -1;

	/* TODO: handle the "red-zone" once this 
		 module is enabled on AMD64 */
	/* We're checking if the bit #21 of EFLAGS 
	   can be toggled. If yes = CPUID is available. */
	asm volatile (
		"pushf\n"
		"popl %%eax\n"
		"xorl $0x200000, %%eax\n"
		"movl %%eax, %%ecx\n"
		"andl $0x200000, %%ecx\n"
		"pushl %%eax\n"
		"popf\n"
		"pushf\n"
		"popl %%eax\n"
		"andl $0x200000, %%eax\n"
		"xorl %%eax, %%ecx\n"
		"movl %%ecx, %0\n"
		: "=r" (result) : : "eax", "ecx");
	
	return (result == 0);
}

static int
padlock_available(void)
{
	char vendor_string[16];
	unsigned int eax, edx;

	/* First check if the CPUID instruction is available at all... */
	if (! padlock_insn_cpuid_available())
		return 0;

	/* Are we running on the Centaur (VIA) CPU? */
	eax = 0x00000000;
	vendor_string[12] = 0;
	asm volatile (
		"pushl	%%ebx\n"
		"cpuid\n"
		"movl	%%ebx,(%%edi)\n"
		"movl	%%edx,4(%%edi)\n"
		"movl	%%ecx,8(%%edi)\n"
		"popl	%%ebx"
		: "+a"(eax) : "D"(vendor_string) : "ebx", "ecx", "edx");
	if (strcmp(vendor_string, "CentaurHauls") != 0)
		return 0;

	/* Check for Centaur Extended Feature Flags presence */
	eax = 0xC0000000;
	asm volatile (
		"pushl	%%ebx\n"
		"cpuid\n"
		"popl	%%ebx"
		: "+a"(eax) : : "ebx", "ecx", "edx");
	if (eax < 0xC0000001)
		return 0;

	/* Read the Centaur Extended Feature Flags */
	eax = 0xC0000001;
	asm volatile (
		"pushl	%%ebx\n"
		"cpuid\n"
		"popl	%%ebx"
		: "+a"(eax), "=d"(edx) : : "ebx", "ecx");

	/* Fill up some flags */
	padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
	padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));

	return padlock_use_ace + padlock_use_rng;
}
/* Our own htonl()/ntohl() */
static inline void
padlock_bswapl (unsigned long *key)
{	int i;

	for (i = 0; i < AES_KEY_WORDS; i++, key++)
		asm volatile ("bswapl %0" : "+r"(*key));
}

static inline void
padlock_reload_key ()
{	asm volatile ("pushfl; popfl");			}

/*
 * This is heuristic key context tracing. At first one
 * believes that one should use atomic swap instructions,
 * but it's not actually necessary. Point is that if
 * saved_cdata was changed by another thread after we've
 * read it and before we compare it with cdata, our key
 * *shall* be reloaded upon thread context switch and
 * we are therefore set in either case...
 */
static inline void
padlock_verify_context (struct padlock_cipher_data *cdata)
{	static struct padlock_cipher_data *saved_cdata;

	asm volatile (
	"pushfl\n"
"	bt	$30,(%%esp)\n"
"	jnc	1f\n"
"	cmp	%2,%1\n"
"	je	1f\n"
"	mov	%2,%0\n"
"	popfl\n"
"	sub	$4,%%esp\n"
"1:	add	$4,%%esp\n"
        :"+m"(saved_cdata):"r"(saved_cdata),"r"(cdata):"cc");
}

/* Template for padlock_xcrypt_* modes */
#define PADLOCK_XCRYPT_ASM(name,opcode)		\
static inline void *name (unsigned int cnt,	\
	struct padlock_cipher_data *cdata,	\
	void *out, const void *inp) 		\
{	asm volatile (	"pushl	%%ebx\n"	\
			"leal	16(%0),%%edx\n"	\
			"leal	32(%0),%%ebx\n"	\
			opcode "\n"		\
			"popl	%%ebx"		\
			: "+a"(cdata),		\
			  "+D"(out),"+S"(inp),"+c"(cnt)\
			: : "edx","cc");	\
	return cdata;	/* well, IV actually */	\
}

/* Generate all functions with appropriate opcodes */
PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8");	/* rep xcryptecb */
PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0");	/* rep xcryptcbc */
PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0");	/* rep xcryptcfb */
PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8");	/* rep xcryptofb */

/* The RNG call itself */
static inline unsigned int
padlock_xstore(size_t count,unsigned int edx_in,
		unsigned char **output_addr)
{
	unsigned int eax_out;

	asm volatile (".byte 0xf3,0x0f,0xa7,0xc0"	/* rep xstore */
	    : "+D"(*output_addr), "=a"(eax_out)	/* output */
	    : "d" (edx_in), "c"(count)	 	/* input */
	    );

	return eax_out;
}

#endif /* __GNUC__ */

/* Check availability of the engine */
static int
padlock_init(ENGINE *e)
{
	return (padlock_use_rng || padlock_use_ace);
}

/* This stuff is needed if this ENGINE is being compiled into a self-contained
 * shared-library.
 */
#ifdef DYNAMIC_ENGINE
static int
padlock_bind_fn(ENGINE *e, const char *id)
{
	if (id && (strcmp(id, padlock_id) != 0)) {
		return 0;
	}

	if (!padlock_bind_helper(e))  {
		return 0;
	}

	return 1;
}

IMPLEMENT_DYNAMIC_CHECK_FN ();
IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn);
#endif /* DYNAMIC_ENGINE */

/* ===== Here comes the "real" engine ===== */

#if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
#define NID_aes_128_cfb	NID_aes_128_cfb128
#endif

#if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
#define NID_aes_128_ofb	NID_aes_128_ofb128
#endif

#if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
#define NID_aes_192_cfb	NID_aes_192_cfb128
#endif

#if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
#define NID_aes_192_ofb	NID_aes_192_ofb128
#endif

#if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
#define NID_aes_256_cfb	NID_aes_256_cfb128
#endif

#if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
#define NID_aes_256_ofb	NID_aes_256_ofb128
#endif

/* List of supported ciphers. */
static int padlock_cipher_nids[] = {
	NID_aes_128_ecb,
	NID_aes_128_cbc,
	NID_aes_128_cfb,
	NID_aes_128_ofb,

	NID_aes_192_ecb,
	NID_aes_192_cbc,
//	NID_aes_192_cfb,	/* FIXME: AES192/256 CFB/OFB don't work. */
//	NID_aes_192_ofb,

	NID_aes_256_ecb,
	NID_aes_256_cbc,
//	NID_aes_256_cfb,
//	NID_aes_256_ofb,
};
static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
				      sizeof(padlock_cipher_nids[0]));

/* Function prototypes ... */
static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
				const unsigned char *iv, int enc);
static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
			      const unsigned char *in, unsigned int nbytes);

#define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)(ctx->cipher_data + ((0x10 - ((size_t)(ctx->cipher_data) & 0x0F)) & 0x0F)))

/* Declaring so many ciphers by hand would be a pain.
   Instead introduce a bit of preprocessor magic :-) */
#define	DECLARE_AES_EVP(ksize,lmode,umode)	\
static const EVP_CIPHER padlock_aes_##ksize##_##lmode = {	\
	NID_aes_##ksize##_##lmode,		\
	AES_BLOCK_SIZE,			\
	AES_KEY_SIZE_##ksize,		\
	AES_BLOCK_SIZE,			\
	0 | EVP_CIPH_##umode##_MODE,	\
	padlock_aes_init_key,		\
	padlock_aes_cipher,		\
	NULL,				\
	sizeof(struct padlock_cipher_data) + 16,	\
	EVP_CIPHER_set_asn1_iv,		\
	EVP_CIPHER_get_asn1_iv,		\
	NULL,				\
	NULL				\
}

DECLARE_AES_EVP(128,ecb,ECB);
DECLARE_AES_EVP(128,cbc,CBC);
DECLARE_AES_EVP(128,cfb,CFB);
DECLARE_AES_EVP(128,ofb,OFB);

DECLARE_AES_EVP(192,ecb,ECB);
DECLARE_AES_EVP(192,cbc,CBC);
DECLARE_AES_EVP(192,cfb,CFB);
DECLARE_AES_EVP(192,ofb,OFB);

DECLARE_AES_EVP(256,ecb,ECB);
DECLARE_AES_EVP(256,cbc,CBC);
DECLARE_AES_EVP(256,cfb,CFB);
DECLARE_AES_EVP(256,ofb,OFB);

static int
padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
{
	/* No specific cipher => return a list of supported nids ... */
	if (!cipher) {
		*nids = padlock_cipher_nids;
		return padlock_cipher_nids_num;
	}

	/* ... or the requested "cipher" otherwise */
	switch (nid) {
	  case NID_aes_128_ecb:
	    *cipher = &padlock_aes_128_ecb;
	    break;
	  case NID_aes_128_cbc:
	    *cipher = &padlock_aes_128_cbc;
	    break;
	  case NID_aes_128_cfb:
	    *cipher = &padlock_aes_128_cfb;
	    break;
	  case NID_aes_128_ofb:
	    *cipher = &padlock_aes_128_ofb;
	    break;

	  case NID_aes_192_ecb:
	    *cipher = &padlock_aes_192_ecb;
	    break;
	  case NID_aes_192_cbc:
	    *cipher = &padlock_aes_192_cbc;
	    break;
	  case NID_aes_192_cfb:
	    *cipher = &padlock_aes_192_cfb;
	    break;
	  case NID_aes_192_ofb:
	    *cipher = &padlock_aes_192_ofb;
	    break;

	  case NID_aes_256_ecb:
	    *cipher = &padlock_aes_256_ecb;
	    break;
	  case NID_aes_256_cbc:
	    *cipher = &padlock_aes_256_cbc;
	    break;
	  case NID_aes_256_cfb:
	    *cipher = &padlock_aes_256_cfb;
	    break;
	  case NID_aes_256_ofb:
	    *cipher = &padlock_aes_256_ofb;
	    break;

	  default:
	    /* Sorry, we don't support this NID */
	    *cipher = NULL;
	    return 0;
	}

	return 1;
}

/* Prepare the encryption key for PadLock usage */
static int
padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
		      const unsigned char *iv, int enc)
{
	struct padlock_cipher_data *cdata;
	int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;

	if (key==NULL) return 0;	/* ERROR */

	cdata = ALIGNED_CIPHER_DATA(ctx);
	memset(cdata, 0, sizeof(struct padlock_cipher_data));

	/* Prepare Control word. */
	cdata->cword.b.encdec = (ctx->encrypt == 0);
	cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
	cdata->cword.b.ksize = (key_len - 128) / 64;

	switch(key_len) {
		case 128:
			/* PadLock can generate an extended key for
			   AES128 in hardware */
			memcpy (cdata->ks.rd_key, key, AES_KEY_SIZE_128);
			cdata->cword.b.keygen = 0;
			break;

		case 192:
		case 256:
			/* Generate an extended AES key in software.
			   Needed for AES192/AES256 */
			if (enc)
				AES_set_encrypt_key(key, key_len, &cdata->ks);
			else
				AES_set_decrypt_key(key, key_len, &cdata->ks);

			/* OpenSSL internal functions use byte-swapped extended key. */
			padlock_bswapl(cdata->ks.rd_key);

			cdata->cword.b.keygen = 1;
			break;

		default:	/* ERROR */
			return 0;
	}

	/*
	 * This is done to cover for cases when user reuses the
	 * context for new key. The catch is that if we don't do
	 * this, padlock_eas_cipher might fail to reload key...
	 */
	padlock_reload_key ();

	return 1;
}

#if 0 /* reserved for future deployment */
static int
padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
		   const unsigned char *in_arg, size_t nbytes)
{
	struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
	void  *iv;

	if (nbytes==0 || nbytes&(AES_BLOCK_SIZE-1))
		return 0; /* are we expected to do tail processing? */

	padlock_verify_context (cdata);

	switch (EVP_CIPHER_CTX_mode(ctx)) {
	case EVP_CIPH_ECB_MODE:
		padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		break;
	case EVP_CIPH_CBC_MODE:
		iv = cdata->iv;
		memcpy (iv, ctx->iv, AES_BLOCK_SIZE);
		iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		memcpy (ctx->iv, iv, AES_BLOCK_SIZE);
		memset (cdata->iv,0,AES_BLOCK_SIZE);
		break;
	case EVP_CIPH_CFB_MODE:
		iv = cdata->iv;
		memcpy (iv, ctx->iv, AES_BLOCK_SIZE);
		iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		memcpy (ctx->iv, iv, AES_BLOCK_SIZE);
		memset (cdata->iv,0,AES_BLOCK_SIZE);
		break;
	case EVP_CIPH_OFB_MODE:
		iv = cdata->iv;
		memcpy (iv, ctx->iv, AES_BLOCK_SIZE);
		padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		memcpy (ctx->iv, iv, AES_BLOCK_SIZE);
		memset (cdata->iv,0,AES_BLOCK_SIZE);
		break;
	default:	return 0;
	}

	return 1;
}
#endif

#define REALIGN_SIZE	4096	/* Must be a power of 2 larger than 16 */

/* Re-align the arguments to 16-Bytes boundaries and run the 
   encryption function itself. This function is not AES-specific. */
static int
padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
		   const unsigned char *in_arg, size_t nbytes)
{
	char  bigbuf[REALIGN_SIZE + 16];

	struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
	const  void *inp;
	void  *out, *iv;
	int    inp_misaligned, out_misaligned, realign_in_loop = 0;
	size_t chunk=nbytes;

	if (nbytes==0 || nbytes&(AES_BLOCK_SIZE-1))
		return 0; /* are we expected to do tail processing? */

	inp_misaligned = (((size_t)in_arg) & 0x0F);
	out_misaligned = (((size_t)out_arg) & 0x0F);

	if ((realign_in_loop=out_misaligned))
		out = bigbuf + ((16 - ((size_t)bigbuf & 0x0F)) & 0x0F);
	else	out = out_arg,
		realign_in_loop = inp_misaligned;
		/* Note that even if output is aligned and input not,
		 * I still prefer to loop instead of copy the whole
		 * input and then encrypt in one stroke. This is done
		 * in order to improve L1 cache utilization... */ 

	padlock_verify_context (cdata);

	/* this takes one "if" out of the loops */
	if (realign_in_loop) {
		chunk &= REALIGN_SIZE-1;
		if (chunk==0) chunk = REALIGN_SIZE;
	}

	switch (EVP_CIPHER_CTX_mode(ctx)) {
	case EVP_CIPH_ECB_MODE:
		if (realign_in_loop) do	{
			if (inp_misaligned)
				inp = memcpy (out,in_arg,chunk);
			else	inp = in_arg;
			in_arg += chunk;

			padlock_xcrypt_ecb(chunk/AES_BLOCK_SIZE,cdata,out,inp);

			if (out_misaligned)
				memcpy (out_arg,out,chunk),
				out_arg += chunk;
			else	out_arg += chunk,
				out = out_arg;

			nbytes -= chunk;
			chunk   = REALIGN_SIZE;
		} while (nbytes);
		else	padlock_xcrypt_ecb(chunk/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		break;

	case EVP_CIPH_CBC_MODE:
		iv = cdata->iv;
		memcpy (iv, ctx->iv, AES_BLOCK_SIZE);
		if (realign_in_loop)		{
			goto cbc_shortcut; do	{
			if (iv != cdata->iv)
				memcpy (cdata->iv,iv,AES_BLOCK_SIZE);
			chunk = REALIGN_SIZE;
		cbc_shortcut: /* optimize for small input */
			if (inp_misaligned)
				inp = memcpy (out,in_arg,chunk);
			else	inp = in_arg;
			in_arg += chunk;

			iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE,cdata,out,inp);

			if (out_misaligned)
				memcpy (out_arg,out,chunk),
				out_arg += chunk;
			else	out_arg += chunk,
				out = out_arg;

		} while (nbytes -= chunk);	}
		else	iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		memcpy (ctx->iv, iv, AES_BLOCK_SIZE);
		break;

	case EVP_CIPH_CFB_MODE:
		iv = cdata->iv;
		memcpy (iv, ctx->iv, AES_BLOCK_SIZE);
		if (realign_in_loop)		{
			goto cfb_shortcut; do	{
			if (iv != cdata->iv)
				memcpy(cdata->iv,iv,AES_BLOCK_SIZE);
			chunk = REALIGN_SIZE;
		cfb_shortcut: /* optimize for small input */
			if (inp_misaligned)
				inp = memcpy (out,in_arg,chunk);
			else	inp = in_arg;
			in_arg += chunk;

			iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE,cdata,out,inp);

			if (out_misaligned)
				memcpy (out_arg,out,chunk),
				out_arg += chunk;
			else	out_arg += chunk,
				out = out_arg;

		} while (nbytes -= chunk);	}
		else	iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		memcpy (ctx->iv, iv, AES_BLOCK_SIZE);
		break;

	case EVP_CIPH_OFB_MODE:
		iv = cdata->iv;
		memcpy (iv, ctx->iv, AES_BLOCK_SIZE);
		if (realign_in_loop) do	{
			if (inp_misaligned)
				inp = memcpy (out,in_arg,chunk);
			else	inp = in_arg;
			in_arg += chunk;

			padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE,cdata,out,inp);

			if (out_misaligned)
				memcpy (out_arg,out,chunk),
				out_arg += chunk;
			else	out_arg += chunk,
				out = out_arg;

			nbytes -= chunk;
			chunk   = REALIGN_SIZE;
		} while (nbytes);
		else	padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE,cdata,out_arg,in_arg);
		memcpy (ctx->iv, iv, AES_BLOCK_SIZE);
		break;
	default:	return 0;
	}

	/* Clean the realign buffer if it was used */
	if (realign_in_loop && out_misaligned) {
		volatile unsigned long *p=out;
		size_t   n=REALIGN_SIZE/sizeof(*p);
		while (n--) *p++=0;
	}
	memset (cdata->iv,0,AES_BLOCK_SIZE);

	return 1;
}

/* ===== Random Number Generator ===== */

/* Wrapper that provides an interface between the API and 
   the raw PadLock RNG */
static int
padlock_rand_bytes(unsigned char *output, int count)
{
	unsigned char *pptr = output;
	unsigned char buf[8];
	int orig_count = count;
	
	/* xstore always stores at least 4 bytes - we must avoid 
	   overwriting of innocent data! */
	if (count > 4) {
		/* 3 ... magic constant, see PadLock RNG docs */
		padlock_xstore(count - 4, 3, &pptr);
		count = 4;
	}
	if (count > 0) {
		pptr = buf;
		padlock_xstore(count, 3, &pptr);
		memcpy(output + orig_count - count, buf, count);
	}
	return 1;
}

/* Dummy but necessary function */
static int
padlock_rand_status(void)
{
	return 1;
}

/* Prepare structure for registration */
static RAND_METHOD padlock_rand = {
	NULL,			/* seed */
	padlock_rand_bytes,	/* bytes */
	NULL,			/* cleanup */
	NULL,			/* add */
	padlock_rand_bytes,	/* pseudorand */
	padlock_rand_status,	/* rand status */
};

#endif /* COMPILE_HW_PADLOCK */

#endif /* !OPENSSL_NO_HW_PADLOCK */
#endif /* !OPENSSL_NO_HW */

Reply via email to