[kernel] r15898 - in dists/sid/linux-2.6/debian: . patches/bugfix/arm patches/series

Martin Michlmayr tbm at alioth.debian.org
Sun Jun 20 13:31:13 UTC 2010


Author: tbm
Date: Sun Jun 20 13:31:03 2010
New Revision: 15898

Log:
update mv_cesa

Added:
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-dest-sglist-diff.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-exec-code-via-func-pointers.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-fix-compiler-warning.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-generic-async-requests.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-invoke-softirq-context.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-make-copy-back-optional.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-process-data-previous-requests.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-rename-variable.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-sha1-async-drivers.patch
   dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-src-sglist-more.patch
Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/patches/series/16

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	Sun Jun 20 00:18:16 2010	(r15897)
+++ dists/sid/linux-2.6/debian/changelog	Sun Jun 20 13:31:03 2010	(r15898)
@@ -52,6 +52,17 @@
   * [armel/kirkwood] Enable FB_XGI and FRAMEBUFFER_CONSOLE.
   * [armel] Make MOUSE_PS2 modular.
   * [armel] Build INPUT_UINPUT for all flavours.
+  * Update Marvell CESA (mv_cesa) driver (Closes: #585790):
+    - Invoke the user callback from a softirq context
+    - Remove compiler warning in mv_cesa driver
+    - Fix situation where the dest sglist is organized differently than...
+    - Fix situations where the src sglist spans more data than the reques...
+    - Enqueue generic async requests
+    - Rename a variable to a more suitable name
+    - Execute some code via function pointers rathr than direct calls
+    - Make the copy-back of data optional
+    - Support processing of data from previous requests
+    - Add sha1 and hmac(sha1) async hash drivers
 
   [ Bastian Blank ]
   * Disable mISDN support for NETJet cards. The driver binds a generic PCI

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-dest-sglist-diff.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-dest-sglist-diff.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,51 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:26:34 +0000 (+0300)
+Subject: crypto: mv_cesa - Fix situation where the dest sglist is organized differently than... 
+X-Git-Tag: v2.6.35-rc1~446^2~36
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=f565e67ec1b8f4a95d21550f9b879fe86b4132e0
+
+crypto: mv_cesa - Fix situation where the dest sglist is organized differently than the source sglist
+
+Bugfix for situations where the destination scatterlist has a different
+buffer structure than the source scatterlist (e.g. source has one 2K
+buffer and dest has 2 1K buffers)
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 37d9f06..018a95c 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -242,6 +242,8 @@ static void dequeue_complete_req(void)
+ 	struct ablkcipher_request *req = cpg->cur_req;
+ 	void *buf;
+ 	int ret;
++	int need_copy_len = cpg->p.crypt_len;
++	int sram_offset = 0;
+ 
+ 	cpg->p.total_req_bytes += cpg->p.crypt_len;
+ 	do {
+@@ -257,14 +259,16 @@ static void dequeue_complete_req(void)
+ 		buf = cpg->p.dst_sg_it.addr;
+ 		buf += cpg->p.dst_start;
+ 
+-		dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+-
+-		memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
++		dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
+ 
++		memcpy(buf,
++		       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
++		       dst_copy);
++		sram_offset += dst_copy;
+ 		cpg->p.sg_dst_left -= dst_copy;
+-		cpg->p.crypt_len -= dst_copy;
++		need_copy_len -= dst_copy;
+ 		cpg->p.dst_start += dst_copy;
+-	} while (cpg->p.crypt_len > 0);
++	} while (need_copy_len > 0);
+ 
+ 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ 	if (cpg->p.total_req_bytes < req->nbytes) {

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-exec-code-via-func-pointers.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-exec-code-via-func-pointers.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,61 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:30:19 +0000 (+0300)
+Subject: crypto: mv_cesa - Execute some code via function pointers rathr than direct calls
+X-Git-Tag: v2.6.35-rc1~446^2~32
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a58094ac5f95d6969e5c52ff096d2fd2864542af
+
+crypto: mv_cesa - Execute some code via function pointers rathr than direct calls
+
+Execute some code via function pointers rathr than direct calls
+(to allow customization in the hashing request)
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 4262932..2b4f07a 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -51,6 +51,8 @@ enum engine_status {
+ struct req_progress {
+ 	struct sg_mapping_iter src_sg_it;
+ 	struct sg_mapping_iter dst_sg_it;
++	void (*complete) (void);
++	void (*process) (int is_first);
+ 
+ 	/* src mostly */
+ 	int sg_src_left;
+@@ -251,6 +253,9 @@ static void mv_crypto_algo_completion(void)
+ 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
+ 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ 
++	sg_miter_stop(&cpg->p.src_sg_it);
++	sg_miter_stop(&cpg->p.dst_sg_it);
++
+ 	if (req_ctx->op != COP_AES_CBC)
+ 		return ;
+ 
+@@ -294,11 +299,9 @@ static void dequeue_complete_req(void)
+ 	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+ 		/* process next scatter list entry */
+ 		cpg->eng_st = ENGINE_BUSY;
+-		mv_process_current_q(0);
++		cpg->p.process(0);
+ 	} else {
+-		sg_miter_stop(&cpg->p.src_sg_it);
+-		sg_miter_stop(&cpg->p.dst_sg_it);
+-		mv_crypto_algo_completion();
++		cpg->p.complete();
+ 		cpg->eng_st = ENGINE_IDLE;
+ 		local_bh_disable();
+ 		req->complete(req, 0);
+@@ -331,6 +334,8 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ 	cpg->cur_req = &req->base;
+ 	memset(p, 0, sizeof(struct req_progress));
+ 	p->hw_nbytes = req->nbytes;
++	p->complete = mv_crypto_algo_completion;
++	p->process = mv_process_current_q;
+ 
+ 	num_sgs = count_sgs(req->src, req->nbytes);
+ 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-fix-compiler-warning.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-fix-compiler-warning.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,26 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:25:56 +0000 (+0300)
+Subject: crypto: mv_cesa - Remove compiler warning in mv_cesa driver
+X-Git-Tag: v2.6.35-rc1~446^2~37
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=6bc6fcd609080461682c5cc0a1e3bf4345d6419d
+
+crypto: mv_cesa - Remove compiler warning in mv_cesa driver
+
+Remove compiler warning
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 3e60ba9..37d9f06 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -178,6 +178,7 @@ static void mv_process_current_q(int first_block)
+ 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ 		break;
+ 	case COP_AES_CBC:
++	default:
+ 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ 		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ 			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-generic-async-requests.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-generic-async-requests.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,187 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:27:33 +0000 (+0300)
+Subject: crypto: mv_cesa - Enqueue generic async requests
+X-Git-Tag: v2.6.35-rc1~446^2~34
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3b61a90502481045f56c1c41a2af35ee48ca8b80
+
+crypto: mv_cesa - Enqueue generic async requests
+
+Enqueue generic async requests rather than ablkcipher requests
+in the driver's queue
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 096f9ff..8891e2e 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -39,6 +39,7 @@ enum engine_status {
+  * @sg_src_left:	bytes left in src to process (scatter list)
+  * @src_start:		offset to add to src start position (scatter list)
+  * @crypt_len:		length of current crypt process
++ * @hw_nbytes:		total bytes to process in hw for this request
+  * @sg_dst_left:	bytes left dst to process in this scatter list
+  * @dst_start:		offset to add to dst start position (scatter list)
+  * @total_req_bytes:	total number of bytes processed (request).
+@@ -55,6 +56,7 @@ struct req_progress {
+ 	int sg_src_left;
+ 	int src_start;
+ 	int crypt_len;
++	int hw_nbytes;
+ 	/* dst mostly */
+ 	int sg_dst_left;
+ 	int dst_start;
+@@ -71,7 +73,7 @@ struct crypto_priv {
+ 	spinlock_t lock;
+ 	struct crypto_queue queue;
+ 	enum engine_status eng_st;
+-	struct ablkcipher_request *cur_req;
++	struct crypto_async_request *cur_req;
+ 	struct req_progress p;
+ 	int max_req_size;
+ 	int sram_size;
+@@ -175,18 +177,18 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
+ 	}
+ }
+ 
+-static void setup_data_in(struct ablkcipher_request *req)
++static void setup_data_in(void)
+ {
+ 	struct req_progress *p = &cpg->p;
+ 	p->crypt_len =
+-	    min((int)req->nbytes - p->total_req_bytes, cpg->max_req_size);
++	    min(p->hw_nbytes - p->total_req_bytes, cpg->max_req_size);
+ 	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
+ 			p->crypt_len);
+ }
+ 
+ static void mv_process_current_q(int first_block)
+ {
+-	struct ablkcipher_request *req = cpg->cur_req;
++	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
+ 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ 	struct sec_accel_config op;
+@@ -229,7 +231,7 @@ static void mv_process_current_q(int first_block)
+ 		ENC_P_DST(SRAM_DATA_OUT_START);
+ 	op.enc_key_p = SRAM_DATA_KEY_P;
+ 
+-	setup_data_in(req);
++	setup_data_in();
+ 	op.enc_len = cpg->p.crypt_len;
+ 	memcpy(cpg->sram + SRAM_CONFIG, &op,
+ 			sizeof(struct sec_accel_config));
+@@ -246,7 +248,7 @@ static void mv_process_current_q(int first_block)
+ 
+ static void mv_crypto_algo_completion(void)
+ {
+-	struct ablkcipher_request *req = cpg->cur_req;
++	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
+ 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ 
+ 	if (req_ctx->op != COP_AES_CBC)
+@@ -257,7 +259,7 @@ static void mv_crypto_algo_completion(void)
+ 
+ static void dequeue_complete_req(void)
+ {
+-	struct ablkcipher_request *req = cpg->cur_req;
++	struct crypto_async_request *req = cpg->cur_req;
+ 	void *buf;
+ 	int ret;
+ 	int need_copy_len = cpg->p.crypt_len;
+@@ -289,7 +291,7 @@ static void dequeue_complete_req(void)
+ 	} while (need_copy_len > 0);
+ 
+ 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+-	if (cpg->p.total_req_bytes < req->nbytes) {
++	if (cpg->p.total_req_bytes < cpg->p.hw_nbytes) {
+ 		/* process next scatter list entry */
+ 		cpg->eng_st = ENGINE_BUSY;
+ 		mv_process_current_q(0);
+@@ -299,7 +301,7 @@ static void dequeue_complete_req(void)
+ 		mv_crypto_algo_completion();
+ 		cpg->eng_st = ENGINE_IDLE;
+ 		local_bh_disable();
+-		req->base.complete(&req->base, 0);
++		req->complete(req, 0);
+ 		local_bh_enable();
+ 	}
+ }
+@@ -323,16 +325,19 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+ 
+ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ {
++	struct req_progress *p = &cpg->p;
+ 	int num_sgs;
+ 
+-	cpg->cur_req = req;
+-	memset(&cpg->p, 0, sizeof(struct req_progress));
++	cpg->cur_req = &req->base;
++	memset(p, 0, sizeof(struct req_progress));
++	p->hw_nbytes = req->nbytes;
+ 
+ 	num_sgs = count_sgs(req->src, req->nbytes);
+-	sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
++	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+ 
+ 	num_sgs = count_sgs(req->dst, req->nbytes);
+-	sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
++	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
++
+ 	mv_process_current_q(1);
+ }
+ 
+@@ -378,13 +383,13 @@ static int queue_manag(void *data)
+ 	return 0;
+ }
+ 
+-static int mv_handle_req(struct ablkcipher_request *req)
++static int mv_handle_req(struct crypto_async_request *req)
+ {
+ 	unsigned long flags;
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&cpg->lock, flags);
+-	ret = ablkcipher_enqueue_request(&cpg->queue, req);
++	ret = crypto_enqueue_request(&cpg->queue, req);
+ 	spin_unlock_irqrestore(&cpg->lock, flags);
+ 	wake_up_process(cpg->queue_th);
+ 	return ret;
+@@ -397,7 +402,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+ 	req_ctx->op = COP_AES_ECB;
+ 	req_ctx->decrypt = 0;
+ 
+-	return mv_handle_req(req);
++	return mv_handle_req(&req->base);
+ }
+ 
+ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+@@ -409,7 +414,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+ 	req_ctx->decrypt = 1;
+ 
+ 	compute_aes_dec_key(ctx);
+-	return mv_handle_req(req);
++	return mv_handle_req(&req->base);
+ }
+ 
+ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+@@ -419,7 +424,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+ 	req_ctx->op = COP_AES_CBC;
+ 	req_ctx->decrypt = 0;
+ 
+-	return mv_handle_req(req);
++	return mv_handle_req(&req->base);
+ }
+ 
+ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+@@ -431,7 +436,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+ 	req_ctx->decrypt = 1;
+ 
+ 	compute_aes_dec_key(ctx);
+-	return mv_handle_req(req);
++	return mv_handle_req(&req->base);
+ }
+ 
+ static int mv_cra_init(struct crypto_tfm *tfm)

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-invoke-softirq-context.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-invoke-softirq-context.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,28 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:25:37 +0000 (+0300)
+Subject: crypto: mv_cesa - Invoke the user callback from a softirq context
+X-Git-Tag: v2.6.35-rc1~446^2~38
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0328ac267564089d9cedfb568f936d30a6debd21
+
+crypto: mv_cesa - Invoke the user callback from a softirq context
+
+Invoke the user callback from a softirq context
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index b21ef63..3e60ba9 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -275,7 +275,9 @@ static void dequeue_complete_req(void)
+ 		sg_miter_stop(&cpg->p.dst_sg_it);
+ 		mv_crypto_algo_completion();
+ 		cpg->eng_st = ENGINE_IDLE;
++		local_bh_disable();
+ 		req->base.complete(&req->base, 0);
++		local_bh_enable();
+ 	}
+ }
+ 

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-make-copy-back-optional.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-make-copy-back-optional.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,100 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:31:48 +0000 (+0300)
+Subject: crypto: mv_cesa - Make the copy-back of data optional
+X-Git-Tag: v2.6.35-rc1~446^2~31
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=f0d03deaad05d9cc99cd2ee0475c9ecd726c19ae
+
+crypto: mv_cesa - Make the copy-back of data optional
+
+Make the copy-back of data optional (not done in hashing requests)
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 2b4f07a..49a2206 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -40,6 +40,7 @@ enum engine_status {
+  * @src_start:		offset to add to src start position (scatter list)
+  * @crypt_len:		length of current crypt process
+  * @hw_nbytes:		total bytes to process in hw for this request
++ * @copy_back:		whether to copy data back (crypt) or not (hash)
+  * @sg_dst_left:	bytes left dst to process in this scatter list
+  * @dst_start:		offset to add to dst start position (scatter list)
+  * @hw_processed_bytes:	number of bytes processed by hw (request).
+@@ -60,6 +61,7 @@ struct req_progress {
+ 	int crypt_len;
+ 	int hw_nbytes;
+ 	/* dst mostly */
++	int copy_back;
+ 	int sg_dst_left;
+ 	int dst_start;
+ 	int hw_processed_bytes;
+@@ -267,33 +269,35 @@ static void dequeue_complete_req(void)
+ 	struct crypto_async_request *req = cpg->cur_req;
+ 	void *buf;
+ 	int ret;
+-	int need_copy_len = cpg->p.crypt_len;
+-	int sram_offset = 0;
+-
+ 	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+-	do {
+-		int dst_copy;
++	if (cpg->p.copy_back) {
++		int need_copy_len = cpg->p.crypt_len;
++		int sram_offset = 0;
++		do {
++			int dst_copy;
++
++			if (!cpg->p.sg_dst_left) {
++				ret = sg_miter_next(&cpg->p.dst_sg_it);
++				BUG_ON(!ret);
++				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
++				cpg->p.dst_start = 0;
++			}
+ 
+-		if (!cpg->p.sg_dst_left) {
+-			ret = sg_miter_next(&cpg->p.dst_sg_it);
+-			BUG_ON(!ret);
+-			cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+-			cpg->p.dst_start = 0;
+-		}
++			buf = cpg->p.dst_sg_it.addr;
++			buf += cpg->p.dst_start;
+ 
+-		buf = cpg->p.dst_sg_it.addr;
+-		buf += cpg->p.dst_start;
++			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
+ 
+-		dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
++			memcpy(buf,
++			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
++			       dst_copy);
++			sram_offset += dst_copy;
++			cpg->p.sg_dst_left -= dst_copy;
++			need_copy_len -= dst_copy;
++			cpg->p.dst_start += dst_copy;
++		} while (need_copy_len > 0);
++	}
+ 
+-		memcpy(buf,
+-		       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+-		       dst_copy);
+-		sram_offset += dst_copy;
+-		cpg->p.sg_dst_left -= dst_copy;
+-		need_copy_len -= dst_copy;
+-		cpg->p.dst_start += dst_copy;
+-	} while (need_copy_len > 0);
+ 
+ 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ 	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+@@ -336,6 +340,7 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ 	p->hw_nbytes = req->nbytes;
+ 	p->complete = mv_crypto_algo_completion;
+ 	p->process = mv_process_current_q;
++	p->copy_back = 1;
+ 
+ 	num_sgs = count_sgs(req->src, req->nbytes);
+ 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-process-data-previous-requests.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-process-data-previous-requests.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,42 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:33:26 +0000 (+0300)
+Subject: crypto: mv_cesa - Support processing of data from previous requests
+X-Git-Tag: v2.6.35-rc1~446^2~30
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0c5c6c4bae8fe9ae3d86b44c332eb1267df1ec99
+
+crypto: mv_cesa - Support processing of data from previous requests
+
+Support processing of data from previous requests (as in hashing
+update/final requests).
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 49a2206..d0fb10e 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -184,10 +184,11 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
+ static void setup_data_in(void)
+ {
+ 	struct req_progress *p = &cpg->p;
+-	p->crypt_len =
++	int data_in_sram =
+ 	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+-	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
+-			p->crypt_len);
++	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
++			data_in_sram - p->crypt_len);
++	p->crypt_len = data_in_sram;
+ }
+ 
+ static void mv_process_current_q(int first_block)
+@@ -298,6 +299,7 @@ static void dequeue_complete_req(void)
+ 		} while (need_copy_len > 0);
+ 	}
+ 
++	cpg->p.crypt_len = 0;
+ 
+ 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ 	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-rename-variable.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-rename-variable.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,63 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:29:16 +0000 (+0300)
+Subject: crypto: mv_cesa - Rename a variable to a more suitable name
+X-Git-Tag: v2.6.35-rc1~446^2~33
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=7a5f691ef03f4c01d2703b5ec4ddd4c17e645dec
+
+crypto: mv_cesa - Rename a variable to a more suitable name
+
+Rename a variable to a more suitable name
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 8891e2e..4262932 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -42,7 +42,7 @@ enum engine_status {
+  * @hw_nbytes:		total bytes to process in hw for this request
+  * @sg_dst_left:	bytes left dst to process in this scatter list
+  * @dst_start:		offset to add to dst start position (scatter list)
+- * @total_req_bytes:	total number of bytes processed (request).
++ * @hw_processed_bytes:	number of bytes processed by hw (request).
+  *
+  * sg helper are used to iterate over the scatterlist. Since the size of the
+  * SRAM may be less than the scatter size, this struct struct is used to keep
+@@ -60,7 +60,7 @@ struct req_progress {
+ 	/* dst mostly */
+ 	int sg_dst_left;
+ 	int dst_start;
+-	int total_req_bytes;
++	int hw_processed_bytes;
+ };
+ 
+ struct crypto_priv {
+@@ -181,7 +181,7 @@ static void setup_data_in(void)
+ {
+ 	struct req_progress *p = &cpg->p;
+ 	p->crypt_len =
+-	    min(p->hw_nbytes - p->total_req_bytes, cpg->max_req_size);
++	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+ 	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
+ 			p->crypt_len);
+ }
+@@ -265,7 +265,7 @@ static void dequeue_complete_req(void)
+ 	int need_copy_len = cpg->p.crypt_len;
+ 	int sram_offset = 0;
+ 
+-	cpg->p.total_req_bytes += cpg->p.crypt_len;
++	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+ 	do {
+ 		int dst_copy;
+ 
+@@ -291,7 +291,7 @@ static void dequeue_complete_req(void)
+ 	} while (need_copy_len > 0);
+ 
+ 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+-	if (cpg->p.total_req_bytes < cpg->p.hw_nbytes) {
++	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+ 		/* process next scatter list entry */
+ 		cpg->eng_st = ENGINE_BUSY;
+ 		mv_process_current_q(0);

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-sha1-async-drivers.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-sha1-async-drivers.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,710 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:34:55 +0000 (+0300)
+Subject: crypto: mv_cesa - Add sha1 and hmac(sha1) async hash drivers
+X-Git-Tag: v2.6.35-rc1~446^2~29
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=750052dd2400cd09e0864d75b63c2c0bf605056f
+
+crypto: mv_cesa - Add sha1 and hmac(sha1) async hash drivers
+
+Add sha1 and hmac(sha1) async hash drivers
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index d0fb10e..1cee5a9 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -14,8 +14,14 @@
+ #include <linux/kthread.h>
+ #include <linux/platform_device.h>
+ #include <linux/scatterlist.h>
++#include <crypto/internal/hash.h>
++#include <crypto/sha.h>
+ 
+ #include "mv_cesa.h"
++
++#define MV_CESA	"MV-CESA:"
++#define MAX_HW_HASH_SIZE	0xFFFF
++
+ /*
+  * STM:
+  *   /---------------------------------------\
+@@ -38,7 +44,7 @@ enum engine_status {
+  * @dst_sg_it:		sg iterator for dst
+  * @sg_src_left:	bytes left in src to process (scatter list)
+  * @src_start:		offset to add to src start position (scatter list)
+- * @crypt_len:		length of current crypt process
++ * @crypt_len:		length of current hw crypt/hash process
+  * @hw_nbytes:		total bytes to process in hw for this request
+  * @copy_back:		whether to copy data back (crypt) or not (hash)
+  * @sg_dst_left:	bytes left dst to process in this scatter list
+@@ -81,6 +87,8 @@ struct crypto_priv {
+ 	struct req_progress p;
+ 	int max_req_size;
+ 	int sram_size;
++	int has_sha1;
++	int has_hmac_sha1;
+ };
+ 
+ static struct crypto_priv *cpg;
+@@ -102,6 +110,31 @@ struct mv_req_ctx {
+ 	int decrypt;
+ };
+ 
++enum hash_op {
++	COP_SHA1,
++	COP_HMAC_SHA1
++};
++
++struct mv_tfm_hash_ctx {
++	struct crypto_shash *fallback;
++	struct crypto_shash *base_hash;
++	u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
++	int count_add;
++	enum hash_op op;
++};
++
++struct mv_req_hash_ctx {
++	u64 count;
++	u32 state[SHA1_DIGEST_SIZE / 4];
++	u8 buffer[SHA1_BLOCK_SIZE];
++	int first_hash;		/* marks that we don't have previous state */
++	int last_chunk;		/* marks that this is the 'final' request */
++	int extra_bytes;	/* unprocessed bytes in buffer */
++	enum hash_op op;
++	int count_add;
++	struct scatterlist dummysg;
++};
++
+ static void compute_aes_dec_key(struct mv_ctx *ctx)
+ {
+ 	struct crypto_aes_ctx gen_aes_key;
+@@ -265,6 +298,132 @@ static void mv_crypto_algo_completion(void)
+ 	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+ }
+ 
++static void mv_process_hash_current(int first_block)
++{
++	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
++	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
++	struct req_progress *p = &cpg->p;
++	struct sec_accel_config op = { 0 };
++	int is_last;
++
++	switch (req_ctx->op) {
++	case COP_SHA1:
++	default:
++		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
++		break;
++	case COP_HMAC_SHA1:
++		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
++		break;
++	}
++
++	op.mac_src_p =
++		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
++		req_ctx->
++		count);
++
++	setup_data_in();
++
++	op.mac_digest =
++		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
++	op.mac_iv =
++		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
++		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
++
++	is_last = req_ctx->last_chunk
++		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
++		&& (req_ctx->count <= MAX_HW_HASH_SIZE);
++	if (req_ctx->first_hash) {
++		if (is_last)
++			op.config |= CFG_NOT_FRAG;
++		else
++			op.config |= CFG_FIRST_FRAG;
++
++		req_ctx->first_hash = 0;
++	} else {
++		if (is_last)
++			op.config |= CFG_LAST_FRAG;
++		else
++			op.config |= CFG_MID_FRAG;
++	}
++
++	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
++
++	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
++	/* GO */
++	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
++
++	/*
++	* XXX: add timer if the interrupt does not occur for some mystery
++	* reason
++	*/
++}
++
++static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
++					  struct shash_desc *desc)
++{
++	int i;
++	struct sha1_state shash_state;
++
++	shash_state.count = ctx->count + ctx->count_add;
++	for (i = 0; i < 5; i++)
++		shash_state.state[i] = ctx->state[i];
++	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
++	return crypto_shash_import(desc, &shash_state);
++}
++
++static int mv_hash_final_fallback(struct ahash_request *req)
++{
++	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
++	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
++	struct {
++		struct shash_desc shash;
++		char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
++	} desc;
++	int rc;
++
++	desc.shash.tfm = tfm_ctx->fallback;
++	desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
++	if (unlikely(req_ctx->first_hash)) {
++		crypto_shash_init(&desc.shash);
++		crypto_shash_update(&desc.shash, req_ctx->buffer,
++				    req_ctx->extra_bytes);
++	} else {
++		/* only SHA1 for now....
++		 */
++		rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
++		if (rc)
++			goto out;
++	}
++	rc = crypto_shash_final(&desc.shash, req->result);
++out:
++	return rc;
++}
++
++static void mv_hash_algo_completion(void)
++{
++	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
++	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
++
++	if (ctx->extra_bytes)
++		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
++	sg_miter_stop(&cpg->p.src_sg_it);
++
++	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
++	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
++	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
++	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
++	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
++
++	if (likely(ctx->last_chunk)) {
++		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
++			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
++			       crypto_ahash_digestsize(crypto_ahash_reqtfm
++						       (req)));
++		} else
++			mv_hash_final_fallback(req);
++	}
++}
++
+ static void dequeue_complete_req(void)
+ {
+ 	struct crypto_async_request *req = cpg->cur_req;
+@@ -332,7 +491,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+ 	return i;
+ }
+ 
+-static void mv_enqueue_new_req(struct ablkcipher_request *req)
++static void mv_start_new_crypt_req(struct ablkcipher_request *req)
+ {
+ 	struct req_progress *p = &cpg->p;
+ 	int num_sgs;
+@@ -353,11 +512,68 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ 	mv_process_current_q(1);
+ }
+ 
++static void mv_start_new_hash_req(struct ahash_request *req)
++{
++	struct req_progress *p = &cpg->p;
++	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
++	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
++	int num_sgs, hw_bytes, old_extra_bytes, rc;
++	cpg->cur_req = &req->base;
++	memset(p, 0, sizeof(struct req_progress));
++	hw_bytes = req->nbytes + ctx->extra_bytes;
++	old_extra_bytes = ctx->extra_bytes;
++
++	if (unlikely(ctx->extra_bytes)) {
++		memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
++		       ctx->extra_bytes);
++		p->crypt_len = ctx->extra_bytes;
++	}
++
++	memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
++
++	if (unlikely(!ctx->first_hash)) {
++		writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
++		writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
++		writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
++		writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
++		writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
++	}
++
++	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
++	if (ctx->extra_bytes != 0
++	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
++		hw_bytes -= ctx->extra_bytes;
++	else
++		ctx->extra_bytes = 0;
++
++	num_sgs = count_sgs(req->src, req->nbytes);
++	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
++
++	if (hw_bytes) {
++		p->hw_nbytes = hw_bytes;
++		p->complete = mv_hash_algo_completion;
++		p->process = mv_process_hash_current;
++
++		mv_process_hash_current(1);
++	} else {
++		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
++				ctx->extra_bytes - old_extra_bytes);
++		sg_miter_stop(&p->src_sg_it);
++		if (ctx->last_chunk)
++			rc = mv_hash_final_fallback(req);
++		else
++			rc = 0;
++		cpg->eng_st = ENGINE_IDLE;
++		local_bh_disable();
++		req->base.complete(&req->base, rc);
++		local_bh_enable();
++	}
++}
++
+ static int queue_manag(void *data)
+ {
+ 	cpg->eng_st = ENGINE_IDLE;
+ 	do {
+-		struct ablkcipher_request *req;
+ 		struct crypto_async_request *async_req = NULL;
+ 		struct crypto_async_request *backlog;
+ 
+@@ -383,9 +599,18 @@ static int queue_manag(void *data)
+ 		}
+ 
+ 		if (async_req) {
+-			req = container_of(async_req,
+-					struct ablkcipher_request, base);
+-			mv_enqueue_new_req(req);
++			if (async_req->tfm->__crt_alg->cra_type !=
++			    &crypto_ahash_type) {
++				struct ablkcipher_request *req =
++				    container_of(async_req,
++						 struct ablkcipher_request,
++						 base);
++				mv_start_new_crypt_req(req);
++			} else {
++				struct ahash_request *req =
++				    ahash_request_cast(async_req);
++				mv_start_new_hash_req(req);
++			}
+ 			async_req = NULL;
+ 		}
+ 
+@@ -457,6 +682,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
+ 	return 0;
+ }
+ 
++static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
++				 int is_last, unsigned int req_len,
++				 int count_add)
++{
++	memset(ctx, 0, sizeof(*ctx));
++	ctx->op = op;
++	ctx->count = req_len;
++	ctx->first_hash = 1;
++	ctx->last_chunk = is_last;
++	ctx->count_add = count_add;
++}
++
++static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
++				   unsigned req_len)
++{
++	ctx->last_chunk = is_last;
++	ctx->count += req_len;
++}
++
++static int mv_hash_init(struct ahash_request *req)
++{
++	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
++	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
++			     tfm_ctx->count_add);
++	return 0;
++}
++
++static int mv_hash_update(struct ahash_request *req)
++{
++	if (!req->nbytes)
++		return 0;
++
++	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
++	return mv_handle_req(&req->base);
++}
++
++static int mv_hash_final(struct ahash_request *req)
++{
++	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
++	/* dummy buffer of 4 bytes */
++	sg_init_one(&ctx->dummysg, ctx->buffer, 4);
++	/* I think I'm allowed to do that... */
++	ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
++	mv_update_hash_req_ctx(ctx, 1, 0);
++	return mv_handle_req(&req->base);
++}
++
++static int mv_hash_finup(struct ahash_request *req)
++{
++	if (!req->nbytes)
++		return mv_hash_final(req);
++
++	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
++	return mv_handle_req(&req->base);
++}
++
++static int mv_hash_digest(struct ahash_request *req)
++{
++	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
++	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
++			     req->nbytes, tfm_ctx->count_add);
++	return mv_handle_req(&req->base);
++}
++
++static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
++			     const void *ostate)
++{
++	const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
++	int i;
++	for (i = 0; i < 5; i++) {
++		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
++		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
++	}
++}
++
++static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
++			  unsigned int keylen)
++{
++	int rc;
++	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
++	int bs, ds, ss;
++
++	if (!ctx->base_hash)
++		return 0;
++
++	rc = crypto_shash_setkey(ctx->fallback, key, keylen);
++	if (rc)
++		return rc;
++
++	/* Can't see a way to extract the ipad/opad from the fallback tfm
++	   so I'm basically copying code from the hmac module */
++	bs = crypto_shash_blocksize(ctx->base_hash);
++	ds = crypto_shash_digestsize(ctx->base_hash);
++	ss = crypto_shash_statesize(ctx->base_hash);
++
++	{
++		struct {
++			struct shash_desc shash;
++			char ctx[crypto_shash_descsize(ctx->base_hash)];
++		} desc;
++		unsigned int i;
++		char ipad[ss];
++		char opad[ss];
++
++		desc.shash.tfm = ctx->base_hash;
++		desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
++		    CRYPTO_TFM_REQ_MAY_SLEEP;
++
++		if (keylen > bs) {
++			int err;
++
++			err =
++			    crypto_shash_digest(&desc.shash, key, keylen, ipad);
++			if (err)
++				return err;
++
++			keylen = ds;
++		} else
++			memcpy(ipad, key, keylen);
++
++		memset(ipad + keylen, 0, bs - keylen);
++		memcpy(opad, ipad, bs);
++
++		for (i = 0; i < bs; i++) {
++			ipad[i] ^= 0x36;
++			opad[i] ^= 0x5c;
++		}
++
++		rc = crypto_shash_init(&desc.shash) ? :
++		    crypto_shash_update(&desc.shash, ipad, bs) ? :
++		    crypto_shash_export(&desc.shash, ipad) ? :
++		    crypto_shash_init(&desc.shash) ? :
++		    crypto_shash_update(&desc.shash, opad, bs) ? :
++		    crypto_shash_export(&desc.shash, opad);
++
++		if (rc == 0)
++			mv_hash_init_ivs(ctx, ipad, opad);
++
++		return rc;
++	}
++}
++
++static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
++			    enum hash_op op, int count_add)
++{
++	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
++	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct crypto_shash *fallback_tfm = NULL;
++	struct crypto_shash *base_hash = NULL;
++	int err = -ENOMEM;
++
++	ctx->op = op;
++	ctx->count_add = count_add;
++
++	/* Allocate a fallback and abort if it failed. */
++	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
++					  CRYPTO_ALG_NEED_FALLBACK);
++	if (IS_ERR(fallback_tfm)) {
++		printk(KERN_WARNING MV_CESA
++		       "Fallback driver '%s' could not be loaded!\n",
++		       fallback_driver_name);
++		err = PTR_ERR(fallback_tfm);
++		goto out;
++	}
++	ctx->fallback = fallback_tfm;
++
++	if (base_hash_name) {
++		/* Allocate a hash to compute the ipad/opad of hmac. */
++		base_hash = crypto_alloc_shash(base_hash_name, 0,
++					       CRYPTO_ALG_NEED_FALLBACK);
++		if (IS_ERR(base_hash)) {
++			printk(KERN_WARNING MV_CESA
++			       "Base driver '%s' could not be loaded!\n",
++			       base_hash_name);
++			err = PTR_ERR(fallback_tfm);
++			goto err_bad_base;
++		}
++	}
++	ctx->base_hash = base_hash;
++
++	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
++				 sizeof(struct mv_req_hash_ctx) +
++				 crypto_shash_descsize(ctx->fallback));
++	return 0;
++err_bad_base:
++	crypto_free_shash(fallback_tfm);
++out:
++	return err;
++}
++
++static void mv_cra_hash_exit(struct crypto_tfm *tfm)
++{
++	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	crypto_free_shash(ctx->fallback);
++	if (ctx->base_hash)
++		crypto_free_shash(ctx->base_hash);
++}
++
++static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
++{
++	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
++}
++
++static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
++{
++	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
++}
++
+ irqreturn_t crypto_int(int irq, void *priv)
+ {
+ 	u32 val;
+@@ -519,6 +953,53 @@ struct crypto_alg mv_aes_alg_cbc = {
+ 	},
+ };
+ 
++struct ahash_alg mv_sha1_alg = {
++	.init = mv_hash_init,
++	.update = mv_hash_update,
++	.final = mv_hash_final,
++	.finup = mv_hash_finup,
++	.digest = mv_hash_digest,
++	.halg = {
++		 .digestsize = SHA1_DIGEST_SIZE,
++		 .base = {
++			  .cra_name = "sha1",
++			  .cra_driver_name = "mv-sha1",
++			  .cra_priority = 300,
++			  .cra_flags =
++			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
++			  .cra_blocksize = SHA1_BLOCK_SIZE,
++			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
++			  .cra_init = mv_cra_hash_sha1_init,
++			  .cra_exit = mv_cra_hash_exit,
++			  .cra_module = THIS_MODULE,
++			  }
++		 }
++};
++
++struct ahash_alg mv_hmac_sha1_alg = {
++	.init = mv_hash_init,
++	.update = mv_hash_update,
++	.final = mv_hash_final,
++	.finup = mv_hash_finup,
++	.digest = mv_hash_digest,
++	.setkey = mv_hash_setkey,
++	.halg = {
++		 .digestsize = SHA1_DIGEST_SIZE,
++		 .base = {
++			  .cra_name = "hmac(sha1)",
++			  .cra_driver_name = "mv-hmac-sha1",
++			  .cra_priority = 300,
++			  .cra_flags =
++			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
++			  .cra_blocksize = SHA1_BLOCK_SIZE,
++			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
++			  .cra_init = mv_cra_hash_hmac_sha1_init,
++			  .cra_exit = mv_cra_hash_exit,
++			  .cra_module = THIS_MODULE,
++			  }
++		 }
++};
++
+ static int mv_probe(struct platform_device *pdev)
+ {
+ 	struct crypto_priv *cp;
+@@ -527,7 +1008,7 @@ static int mv_probe(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	if (cpg) {
+-		printk(KERN_ERR "Second crypto dev?\n");
++		printk(KERN_ERR MV_CESA "Second crypto dev?\n");
+ 		return -EEXIST;
+ 	}
+ 
+@@ -591,6 +1072,21 @@ static int mv_probe(struct platform_device *pdev)
+ 	ret = crypto_register_alg(&mv_aes_alg_cbc);
+ 	if (ret)
+ 		goto err_unreg_ecb;
++
++	ret = crypto_register_ahash(&mv_sha1_alg);
++	if (ret == 0)
++		cpg->has_sha1 = 1;
++	else
++		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
++
++	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
++	if (ret == 0) {
++		cpg->has_hmac_sha1 = 1;
++	} else {
++		printk(KERN_WARNING MV_CESA
++		       "Could not register hmac-sha1 driver\n");
++	}
++
+ 	return 0;
+ err_unreg_ecb:
+ 	crypto_unregister_alg(&mv_aes_alg_ecb);
+@@ -615,6 +1111,10 @@ static int mv_remove(struct platform_device *pdev)
+ 
+ 	crypto_unregister_alg(&mv_aes_alg_ecb);
+ 	crypto_unregister_alg(&mv_aes_alg_cbc);
++	if (cp->has_sha1)
++		crypto_unregister_ahash(&mv_sha1_alg);
++	if (cp->has_hmac_sha1)
++		crypto_unregister_ahash(&mv_hmac_sha1_alg);
+ 	kthread_stop(cp->queue_th);
+ 	free_irq(cp->irq, cp);
+ 	memset(cp->sram, 0, cp->sram_size);
+diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
+index c3e25d3..08fcb11 100644
+--- a/drivers/crypto/mv_cesa.h
++++ b/drivers/crypto/mv_cesa.h
+@@ -1,6 +1,10 @@
+ #ifndef __MV_CRYPTO_H__
+ 
+ #define DIGEST_INITIAL_VAL_A	0xdd00
++#define DIGEST_INITIAL_VAL_B	0xdd04
++#define DIGEST_INITIAL_VAL_C	0xdd08
++#define DIGEST_INITIAL_VAL_D	0xdd0c
++#define DIGEST_INITIAL_VAL_E	0xdd10
+ #define DES_CMD_REG		0xdd58
+ 
+ #define SEC_ACCEL_CMD		0xde00
+@@ -70,6 +74,10 @@ struct sec_accel_config {
+ #define CFG_AES_LEN_128		(0 << 24)
+ #define CFG_AES_LEN_192		(1 << 24)
+ #define CFG_AES_LEN_256		(2 << 24)
++#define CFG_NOT_FRAG		(0 << 30)
++#define CFG_FIRST_FRAG		(1 << 30)
++#define CFG_LAST_FRAG		(2 << 30)
++#define CFG_MID_FRAG		(3 << 30)
+ 
+ 	u32 enc_p;
+ #define ENC_P_SRC(x)		(x)
+@@ -90,7 +98,11 @@ struct sec_accel_config {
+ #define MAC_SRC_TOTAL_LEN(x)	((x) << 16)
+ 
+ 	u32 mac_digest;
++#define MAC_DIGEST_P(x)	(x)
++#define MAC_FRAG_LEN(x)	((x) << 16)
+ 	u32 mac_iv;
++#define MAC_INNER_IV_P(x)	(x)
++#define MAC_OUTER_IV_P(x)	((x) << 16)
+ }__attribute__ ((packed));
+ 	/*
+ 	 * /-----------\ 0
+@@ -101,19 +113,37 @@ struct sec_accel_config {
+ 	 * |  IV   IN  |	4 * 4
+ 	 * |-----------| 0x40 (inplace)
+ 	 * |  IV BUF   |	4 * 4
+-	 * |-----------| 0x50
++	 * |-----------| 0x80
+ 	 * |  DATA IN  |	16 * x (max ->max_req_size)
+-	 * |-----------| 0x50 (inplace operation)
++	 * |-----------| 0x80 (inplace operation)
+ 	 * |  DATA OUT |	16 * x (max ->max_req_size)
+ 	 * \-----------/ SRAM size
+ 	 */
++
++	/* Hashing memory map:
++	 * /-----------\ 0
++	 * | ACCEL CFG |        4 * 8
++	 * |-----------| 0x20
++	 * | Inner IV  |        5 * 4
++	 * |-----------| 0x34
++	 * | Outer IV  |        5 * 4
++	 * |-----------| 0x48
++	 * | Output BUF|        5 * 4
++	 * |-----------| 0x80
++	 * |  DATA IN  |        64 * x (max ->max_req_size)
++	 * \-----------/ SRAM size
++	 */
+ #define SRAM_CONFIG		0x00
+ #define SRAM_DATA_KEY_P		0x20
+ #define SRAM_DATA_IV		0x40
+ #define SRAM_DATA_IV_BUF	0x40
+-#define SRAM_DATA_IN_START	0x50
+-#define SRAM_DATA_OUT_START	0x50
++#define SRAM_DATA_IN_START	0x80
++#define SRAM_DATA_OUT_START	0x80
++
++#define SRAM_HMAC_IV_IN		0x20
++#define SRAM_HMAC_IV_OUT	0x34
++#define SRAM_DIGEST_BUF		0x48
+ 
+-#define SRAM_CFG_SPACE		0x50
++#define SRAM_CFG_SPACE		0x80
+ 
+ #endif

Added: dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-src-sglist-more.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/arm/mv_cesa-src-sglist-more.patch	Sun Jun 20 13:31:03 2010	(r15898)
@@ -0,0 +1,104 @@
+From: Uri Simchoni <uri at jdland.co.il>
+Date: Thu, 8 Apr 2010 16:27:02 +0000 (+0300)
+Subject: crypto: mv_cesa - Fix situations where the src sglist spans more data than the reques... 
+X-Git-Tag: v2.6.35-rc1~446^2~35
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=15d4dd3594221f11a7730fcf2d5f9942b96cdd7e
+
+crypto: mv_cesa - Fix situations where the src sglist spans more data than the request asks for
+
+Fix for situations where the source scatterlist spans more data than the
+request nbytes
+
+Signed-off-by: Uri Simchoni <uri at jdland.co.il>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 018a95c..096f9ff 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -143,27 +143,45 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ 	return 0;
+ }
+ 
+-static void setup_data_in(struct ablkcipher_request *req)
++static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
+ {
+ 	int ret;
+-	void *buf;
+-
+-	if (!cpg->p.sg_src_left) {
+-		ret = sg_miter_next(&cpg->p.src_sg_it);
+-		BUG_ON(!ret);
+-		cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+-		cpg->p.src_start = 0;
+-	}
++	void *sbuf;
++	int copied = 0;
+ 
+-	cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+-
+-	buf = cpg->p.src_sg_it.addr;
+-	buf += cpg->p.src_start;
++	while (1) {
++		if (!p->sg_src_left) {
++			ret = sg_miter_next(&p->src_sg_it);
++			BUG_ON(!ret);
++			p->sg_src_left = p->src_sg_it.length;
++			p->src_start = 0;
++		}
+ 
+-	memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
++		sbuf = p->src_sg_it.addr + p->src_start;
++
++		if (p->sg_src_left <= len - copied) {
++			memcpy(dbuf + copied, sbuf, p->sg_src_left);
++			copied += p->sg_src_left;
++			p->sg_src_left = 0;
++			if (copied >= len)
++				break;
++		} else {
++			int copy_len = len - copied;
++			memcpy(dbuf + copied, sbuf, copy_len);
++			p->src_start += copy_len;
++			p->sg_src_left -= copy_len;
++			break;
++		}
++	}
++}
+ 
+-	cpg->p.sg_src_left -= cpg->p.crypt_len;
+-	cpg->p.src_start += cpg->p.crypt_len;
++static void setup_data_in(struct ablkcipher_request *req)
++{
++	struct req_progress *p = &cpg->p;
++	p->crypt_len =
++	    min((int)req->nbytes - p->total_req_bytes, cpg->max_req_size);
++	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
++			p->crypt_len);
+ }
+ 
+ static void mv_process_current_q(int first_block)
+@@ -289,12 +307,16 @@ static void dequeue_complete_req(void)
+ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+ {
+ 	int i = 0;
+-
+-	do {
+-		total_bytes -= sl[i].length;
+-		i++;
+-
+-	} while (total_bytes > 0);
++	size_t cur_len;
++
++	while (1) {
++		cur_len = sl[i].length;
++		++i;
++		if (total_bytes > cur_len)
++			total_bytes -= cur_len;
++		else
++			break;
++	}
+ 
+ 	return i;
+ }

Modified: dists/sid/linux-2.6/debian/patches/series/16
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/16	Sun Jun 20 00:18:16 2010	(r15897)
+++ dists/sid/linux-2.6/debian/patches/series/16	Sun Jun 20 13:31:03 2010	(r15898)
@@ -147,3 +147,13 @@
 + bugfix/all/r8169-fix-random-mdio_write-failures.patch
 + bugfix/all/r8169-fix-mdio_read-and-update-mdio_write.patch
 + features/all/xgifb-driver.patch
++ bugfix/arm/mv_cesa-invoke-softirq-context.patch
++ bugfix/arm/mv_cesa-fix-compiler-warning.patch
++ bugfix/arm/mv_cesa-dest-sglist-diff.patch
++ bugfix/arm/mv_cesa-src-sglist-more.patch
++ bugfix/arm/mv_cesa-generic-async-requests.patch
++ bugfix/arm/mv_cesa-rename-variable.patch
++ bugfix/arm/mv_cesa-exec-code-via-func-pointers.patch
++ bugfix/arm/mv_cesa-make-copy-back-optional.patch
++ bugfix/arm/mv_cesa-process-data-previous-requests.patch
++ bugfix/arm/mv_cesa-sha1-async-drivers.patch



More information about the Kernel-svn-changes mailing list