Blob


1 --- test021.left.txt
2 +++ test021.right.txt
3 @@ -1,4 +1,4 @@
4 -/* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
5 +/* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */
6 /*
7 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
8 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
9 @@ -25,7 +25,6 @@
10 #include <sys/buf.h>
11 #include <sys/device.h>
12 #include <sys/ioctl.h>
13 -#include <sys/proc.h>
14 #include <sys/malloc.h>
15 #include <sys/pool.h>
16 #include <sys/kernel.h>
17 @@ -34,6 +33,7 @@
18 #include <sys/queue.h>
19 #include <sys/fcntl.h>
20 #include <sys/disklabel.h>
21 +#include <sys/vnode.h>
22 #include <sys/mount.h>
23 #include <sys/sensors.h>
24 #include <sys/stat.h>
25 @@ -42,7 +42,6 @@
26 #include <sys/dkio.h>
28 #include <crypto/cryptodev.h>
29 -#include <crypto/cryptosoft.h>
30 #include <crypto/rijndael.h>
31 #include <crypto/md5.h>
32 #include <crypto/sha1.h>
33 @@ -54,7 +53,6 @@
34 #include <scsi/scsi_disk.h>
36 #include <dev/softraidvar.h>
37 -#include <dev/rndvar.h>
39 /*
40 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
41 @@ -62,18 +60,15 @@
42 * because we assert that only one ccb per WU will ever be active.
43 */
44 struct sr_crypto_wu {
45 - TAILQ_ENTRY(sr_crypto_wu) cr_link;
46 + struct sr_workunit cr_wu; /* Must be first. */
47 struct uio cr_uio;
48 struct iovec cr_iov;
49 struct cryptop *cr_crp;
50 - struct cryptodesc *cr_descs;
51 - struct sr_workunit *cr_wu;
52 void *cr_dmabuf;
53 };
56 -struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
57 -void sr_crypto_wu_put(struct sr_crypto_wu *);
58 +struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int);
59 int sr_crypto_create_keys(struct sr_discipline *);
60 int sr_crypto_get_kdf(struct bioc_createraid *,
61 struct sr_discipline *);
62 @@ -92,12 +87,11 @@
63 struct bioc_discipline *);
64 int sr_crypto_meta_opt_handler(struct sr_discipline *,
65 struct sr_meta_opt_hdr *);
66 -int sr_crypto_write(struct cryptop *);
67 +void sr_crypto_write(struct cryptop *);
68 int sr_crypto_rw(struct sr_workunit *);
69 -int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
70 +int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
71 void sr_crypto_done(struct sr_workunit *);
72 -int sr_crypto_read(struct cryptop *);
73 -void sr_crypto_finish_io(struct sr_workunit *);
74 +void sr_crypto_read(struct cryptop *);
75 void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
76 u_int8_t *, int, u_char *);
77 void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
78 @@ -113,6 +107,7 @@
79 int i;
81 /* Fill out discipline members. */
82 + sd->sd_wu_size = sizeof(struct sr_crypto_wu);
83 sd->sd_type = SR_MD_CRYPTO;
84 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
85 sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
86 @@ -143,8 +138,14 @@
87 sr_error(sd->sd_sc, "%s requires exactly one chunk",
88 sd->sd_name);
89 goto done;
90 - }
91 + }
93 + if (coerced_size > SR_CRYPTO_MAXSIZE) {
94 + sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
95 + sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE);
96 + goto done;
97 + }
98 +
99 /* Create crypto optional metadata. */
100 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
101 M_WAITOK | M_ZERO);
102 @@ -208,7 +209,7 @@
104 if (data != NULL) {
105 /* Kernel already has mask key. */
106 - bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
107 + memcpy(sd->mds.mdd_crypto.scr_maskkey, data,
108 sizeof(sd->mds.mdd_crypto.scr_maskkey));
109 } else if (bc->bc_key_disk != NODEV) {
110 /* Read the mask key from the key disk. */
111 @@ -248,119 +249,71 @@
114 struct sr_crypto_wu *
115 -sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
116 +sr_crypto_prepare(struct sr_workunit *wu, int encrypt)
118 struct scsi_xfer *xs = wu->swu_xs;
119 struct sr_discipline *sd = wu->swu_dis;
120 struct sr_crypto_wu *crwu;
121 struct cryptodesc *crd;
122 int flags, i, n;
123 - daddr64_t blk = 0;
124 + daddr_t blkno;
125 u_int keyndx;
127 - DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
128 + DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
129 DEVNAME(sd->sd_sc), wu, encrypt);
131 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
132 - if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
133 - TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
134 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
135 - if (crwu == NULL)
136 - panic("sr_crypto_wu_get: out of wus");
138 + crwu = (struct sr_crypto_wu *)wu;
139 crwu->cr_uio.uio_iovcnt = 1;
140 crwu->cr_uio.uio_iov->iov_len = xs->datalen;
141 if (xs->flags & SCSI_DATA_OUT) {
142 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
143 - bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
144 + memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen);
145 } else
146 crwu->cr_uio.uio_iov->iov_base = xs->data;
148 - if (xs->cmdlen == 10)
149 - blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
150 - else if (xs->cmdlen == 16)
151 - blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
152 - else if (xs->cmdlen == 6)
153 - blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
155 + blkno = wu->swu_blk_start;
156 n = xs->datalen >> DEV_BSHIFT;
158 /*
159 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
160 - * Since there may be less than that we need to tweak the linked list
161 + * Since there may be less than that we need to tweak the amount
162 * of crypto desc structures to be just long enough for our needs.
163 */
164 - crd = crwu->cr_descs;
165 - for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
166 - crd = crd->crd_next;
167 - KASSERT(crd);
168 - }
169 - crwu->cr_crp->crp_desc = crd;
170 + KASSERT(crwu->cr_crp->crp_ndescalloc >= n);
171 + crwu->cr_crp->crp_ndesc = n;
172 flags = (encrypt ? CRD_F_ENCRYPT : 0) |
173 CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
175 - /* Select crypto session based on block number */
176 - keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
177 - if (keyndx >= SR_CRYPTO_MAXKEYS)
178 - goto unwind;
179 + /*
180 + * Select crypto session based on block number.
181 + *
182 + * XXX - this does not handle the case where the read/write spans
183 + * across a different key blocks (e.g. 0.5TB boundary). Currently
184 + * this is already broken by the use of scr_key[0] below.
185 + */
186 + keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT;
187 crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
188 - if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
189 - goto unwind;
191 + crwu->cr_crp->crp_opaque = crwu;
192 crwu->cr_crp->crp_ilen = xs->datalen;
193 crwu->cr_crp->crp_alloctype = M_DEVBUF;
194 + crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE;
195 crwu->cr_crp->crp_buf = &crwu->cr_uio;
196 - for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
197 - i++, blk++, crd = crd->crd_next) {
198 + for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
199 + crd = &crwu->cr_crp->crp_desc[i];
200 crd->crd_skip = i << DEV_BSHIFT;
201 crd->crd_len = DEV_BSIZE;
202 crd->crd_inject = 0;
203 crd->crd_flags = flags;
204 - crd->crd_alg = CRYPTO_AES_XTS;
206 - switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
207 - case SR_CRYPTOA_AES_XTS_128:
208 - crd->crd_klen = 256;
209 - break;
210 - case SR_CRYPTOA_AES_XTS_256:
211 - crd->crd_klen = 512;
212 - break;
213 - default:
214 - goto unwind;
215 - }
216 + crd->crd_alg = sd->mds.mdd_crypto.scr_alg;
217 + crd->crd_klen = sd->mds.mdd_crypto.scr_klen;
218 crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
219 - bcopy(&blk, crd->crd_iv, sizeof(blk));
220 + memcpy(crd->crd_iv, &blkno, sizeof(blkno));
222 - crwu->cr_wu = wu;
223 - crwu->cr_crp->crp_opaque = crwu;
225 return (crwu);
227 -unwind:
228 - /* steal the descriptors back from the cryptop */
229 - crwu->cr_crp->crp_desc = NULL;
231 - return (NULL);
234 -void
235 -sr_crypto_wu_put(struct sr_crypto_wu *crwu)
236 -{
237 - struct cryptop *crp = crwu->cr_crp;
238 - struct sr_workunit *wu = crwu->cr_wu;
239 - struct sr_discipline *sd = wu->swu_dis;
241 - DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
242 - DEVNAME(wu->swu_dis->sd_sc), crwu);
244 - /* steal the descriptors back from the cryptop */
245 - crp->crp_desc = NULL;
247 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
248 - TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
249 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
250 -}
252 int
253 sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
255 @@ -386,9 +339,8 @@
256 if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
257 kdfinfo->genkdf.len)
258 goto out;
259 - bcopy(&kdfinfo->genkdf,
260 - sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
261 - kdfinfo->genkdf.len);
262 + memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
263 + &kdfinfo->genkdf, kdfinfo->genkdf.len);
266 /* copy mask key to run-time meta data */
267 @@ -396,7 +348,7 @@
268 if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
269 sizeof(kdfinfo->maskkey))
270 goto out;
271 - bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
272 + memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey,
273 sizeof(kdfinfo->maskkey));
276 @@ -404,7 +356,7 @@
277 rv = 0;
278 out:
279 explicit_bzero(kdfinfo, bc->bc_opaque_size);
280 - free(kdfinfo, M_DEVBUF);
281 + free(kdfinfo, M_DEVBUF, bc->bc_opaque_size);
283 return (rv);
285 @@ -424,7 +376,7 @@
286 rv = 0;
287 break;
288 default:
289 - DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
290 + DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
291 "softraid", alg);
292 rv = -1;
293 goto out;
294 @@ -450,7 +402,7 @@
295 rv = 0;
296 break;
297 default:
298 - DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
299 + DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
300 "softraid", alg);
301 rv = -1;
302 goto out;
303 @@ -617,6 +569,17 @@
304 goto out;
307 + /* Copy new KDF hint to metadata, if supplied. */
308 + if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) {
309 + if (kdfinfo2->genkdf.len >
310 + sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint))
311 + goto out;
312 + explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
313 + sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint));
314 + memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
315 + &kdfinfo2->genkdf, kdfinfo2->genkdf.len);
316 + }
318 /* Mask the disk keys. */
319 c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
320 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
321 @@ -630,7 +593,7 @@
322 sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
324 /* Copy new encrypted key and HMAC to metadata. */
325 - bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
326 + memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest,
327 sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
329 rv = 0; /* Success */
330 @@ -638,7 +601,7 @@
331 out:
332 if (p) {
333 explicit_bzero(p, ksz);
334 - free(p, M_DEVBUF);
335 + free(p, M_DEVBUF, ksz);
338 explicit_bzero(check_digest, sizeof(check_digest));
339 @@ -686,7 +649,7 @@
340 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
341 "open %s\n", DEVNAME(sc), devname);
342 vput(vn);
343 - goto fail;
344 + goto done;
346 open = 1; /* close dev on error */
348 @@ -696,19 +659,12 @@
349 FREAD, NOCRED, curproc)) {
350 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
351 "failed\n", DEVNAME(sc));
352 - VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
353 - vput(vn);
354 - goto fail;
355 + goto done;
357 - if (label.d_secsize != DEV_BSIZE) {
358 - sr_error(sc, "%s has unsupported sector size (%d)",
359 - devname, label.d_secsize);
360 - goto fail;
361 - }
362 if (label.d_partitions[part].p_fstype != FS_RAID) {
363 - sr_error(sc, "%s partition not of type RAID (%d)\n",
364 + sr_error(sc, "%s partition not of type RAID (%d)",
365 devname, label.d_partitions[part].p_fstype);
366 - goto fail;
367 + goto done;
370 /*
371 @@ -728,7 +684,7 @@
372 km->scmi.scm_size = 0;
373 km->scmi.scm_coerced_size = 0;
374 strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
375 - bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
376 + memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
377 sizeof(struct sr_uuid));
379 sr_checksum(sc, km, &km->scm_checksum,
380 @@ -745,7 +701,7 @@
381 sm->ssdi.ssd_version = SR_META_VERSION;
382 sm->ssd_ondisk = 0;
383 sm->ssdi.ssd_vol_flags = 0;
384 - bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
385 + memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,
386 sizeof(struct sr_uuid));
387 sm->ssdi.ssd_chunk_no = 1;
388 sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
389 @@ -785,7 +741,7 @@
390 omi->omi_som->som_type = SR_OPT_KEYDISK;
391 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
392 skm = (struct sr_meta_keydisk *)omi->omi_som;
393 - bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
394 + memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey,
395 sizeof(skm->skm_maskkey));
396 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
397 fakesd->sd_meta->ssdi.ssd_opt_no++;
398 @@ -799,19 +755,16 @@
399 goto done;
401 fail:
402 - if (key_disk)
403 - free(key_disk, M_DEVBUF);
404 + free(key_disk, M_DEVBUF, sizeof(struct sr_chunk));
405 key_disk = NULL;
407 done:
408 - if (omi)
409 - free(omi, M_DEVBUF);
410 + free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
411 if (fakesd && fakesd->sd_vol.sv_chunks)
412 - free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
413 - if (fakesd)
414 - free(fakesd, M_DEVBUF);
415 - if (sm)
416 - free(sm, M_DEVBUF);
417 + free(fakesd->sd_vol.sv_chunks, M_DEVBUF,
418 + sizeof(struct sr_chunk *));
419 + free(fakesd, M_DEVBUF, sizeof(struct sr_discipline));
420 + free(sm, M_DEVBUF, sizeof(struct sr_metadata));
421 if (open) {
422 VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
423 vput(vn);
424 @@ -855,7 +808,7 @@
425 sr_error(sc, "cannot open key disk %s", devname);
426 goto done;
428 - if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
429 + if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) {
430 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
431 "open %s\n", DEVNAME(sc), devname);
432 vput(vn);
433 @@ -869,17 +822,10 @@
434 NOCRED, curproc)) {
435 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
436 "failed\n", DEVNAME(sc));
437 - VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
438 - vput(vn);
439 goto done;
441 - if (label.d_secsize != DEV_BSIZE) {
442 - sr_error(sc, "%s has unsupported sector size (%d)",
443 - devname, label.d_secsize);
444 - goto done;
445 - }
446 if (label.d_partitions[part].p_fstype != FS_RAID) {
447 - sr_error(sc, "%s partition not of type RAID (%d)\n",
448 + sr_error(sc, "%s partition not of type RAID (%d)",
449 devname, label.d_partitions[part].p_fstype);
450 goto done;
452 @@ -887,7 +833,7 @@
453 /*
454 * Read and validate key disk metadata.
455 */
456 - sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
457 + sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO);
458 if (sr_meta_native_read(sd, dev, sm, NULL)) {
459 sr_error(sc, "native bootprobe could not read native metadata");
460 goto done;
461 @@ -911,7 +857,7 @@
462 key_disk->src_vn = vn;
463 key_disk->src_size = 0;
465 - bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
466 + memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),
467 sizeof(key_disk->src_meta));
469 /* Read mask key from optional metadata. */
470 @@ -920,13 +866,12 @@
471 omh = omi->omi_som;
472 if (omh->som_type == SR_OPT_KEYDISK) {
473 skm = (struct sr_meta_keydisk *)omh;
474 - bcopy(&skm->skm_maskkey,
475 - sd->mds.mdd_crypto.scr_maskkey,
476 + memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
477 sizeof(sd->mds.mdd_crypto.scr_maskkey));
478 } else if (omh->som_type == SR_OPT_CRYPTO) {
479 /* Original keydisk format with key in crypto area. */
480 - bcopy(omh + sizeof(struct sr_meta_opt_hdr),
481 - sd->mds.mdd_crypto.scr_maskkey,
482 + memcpy(sd->mds.mdd_crypto.scr_maskkey,
483 + omh + sizeof(struct sr_meta_opt_hdr),
484 sizeof(sd->mds.mdd_crypto.scr_maskkey));
487 @@ -934,15 +879,13 @@
488 open = 0;
490 done:
491 - for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
492 + for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) {
493 omi_next = SLIST_NEXT(omi, omi_link);
494 - if (omi->omi_som)
495 - free(omi->omi_som, M_DEVBUF);
496 - free(omi, M_DEVBUF);
497 + free(omi->omi_som, M_DEVBUF, 0);
498 + free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item));
501 - if (sm)
502 - free(sm, M_DEVBUF);
503 + free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
505 if (vn && open) {
506 VOP_CLOSE(vn, FREAD, NOCRED, curproc);
507 @@ -952,16 +895,43 @@
508 return key_disk;
511 +static void
512 +sr_crypto_free_sessions(struct sr_discipline *sd)
513 +{
514 + u_int i;
516 + for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
517 + if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) {
518 + crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
519 + sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
520 + }
521 + }
522 +}
524 int
525 sr_crypto_alloc_resources(struct sr_discipline *sd)
527 - struct cryptoini cri;
528 + struct sr_workunit *wu;
529 struct sr_crypto_wu *crwu;
530 + struct cryptoini cri;
531 u_int num_keys, i;
533 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
534 DEVNAME(sd->sd_sc));
536 + sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS;
537 + switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
538 + case SR_CRYPTOA_AES_XTS_128:
539 + sd->mds.mdd_crypto.scr_klen = 256;
540 + break;
541 + case SR_CRYPTOA_AES_XTS_256:
542 + sd->mds.mdd_crypto.scr_klen = 512;
543 + break;
544 + default:
545 + sr_error(sd->sd_sc, "unknown crypto algorithm");
546 + return (EINVAL);
547 + }
549 for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
550 sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
552 @@ -979,61 +949,34 @@
555 /*
556 - * For each wu allocate the uio, iovec and crypto structures.
557 - * these have to be allocated now because during runtime we can't
558 - * fail an allocation without failing the io (which can cause real
559 + * For each work unit allocate the uio, iovec and crypto structures.
560 + * These have to be allocated now because during runtime we cannot
561 + * fail an allocation without failing the I/O (which can cause real
562 * problems).
563 */
564 - mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
565 - TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
566 - for (i = 0; i < sd->sd_max_wu; i++) {
567 - crwu = malloc(sizeof(*crwu), M_DEVBUF,
568 - M_WAITOK | M_ZERO | M_CANFAIL);
569 - if (crwu == NULL)
570 - return (ENOMEM);
571 - /* put it on the list now so if we fail it'll be freed */
572 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
573 - TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
574 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
576 + TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
577 + crwu = (struct sr_crypto_wu *)wu;
578 crwu->cr_uio.uio_iov = &crwu->cr_iov;
579 crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
580 crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
581 if (crwu->cr_crp == NULL)
582 return (ENOMEM);
583 - /* steal the list of cryptodescs */
584 - crwu->cr_descs = crwu->cr_crp->crp_desc;
585 - crwu->cr_crp->crp_desc = NULL;
588 - bzero(&cri, sizeof(cri));
589 - cri.cri_alg = CRYPTO_AES_XTS;
590 - switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
591 - case SR_CRYPTOA_AES_XTS_128:
592 - cri.cri_klen = 256;
593 - break;
594 - case SR_CRYPTOA_AES_XTS_256:
595 - cri.cri_klen = 512;
596 - break;
597 - default:
598 - return (EINVAL);
599 - }
600 + memset(&cri, 0, sizeof(cri));
601 + cri.cri_alg = sd->mds.mdd_crypto.scr_alg;
602 + cri.cri_klen = sd->mds.mdd_crypto.scr_klen;
604 - /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
605 - num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
606 - if (num_keys >= SR_CRYPTO_MAXKEYS)
607 + /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
608 + num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >>
609 + SR_CRYPTO_KEY_BLKSHIFT) + 1;
610 + if (num_keys > SR_CRYPTO_MAXKEYS)
611 return (EFBIG);
612 - for (i = 0; i <= num_keys; i++) {
613 + for (i = 0; i < num_keys; i++) {
614 cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
615 if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
616 &cri, 0) != 0) {
617 - for (i = 0;
618 - sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
619 - i++) {
620 - crypto_freesession(
621 - sd->mds.mdd_crypto.scr_sid[i]);
622 - sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
623 - }
624 + sr_crypto_free_sessions(sd);
625 return (EINVAL);
628 @@ -1046,39 +989,30 @@
629 void
630 sr_crypto_free_resources(struct sr_discipline *sd)
632 + struct sr_workunit *wu;
633 struct sr_crypto_wu *crwu;
634 - u_int i;
636 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
637 DEVNAME(sd->sd_sc));
639 if (sd->mds.mdd_crypto.key_disk != NULL) {
640 - explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
641 - sd->mds.mdd_crypto.key_disk);
642 - free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
643 + explicit_bzero(sd->mds.mdd_crypto.key_disk,
644 + sizeof(*sd->mds.mdd_crypto.key_disk));
645 + free(sd->mds.mdd_crypto.key_disk, M_DEVBUF,
646 + sizeof(*sd->mds.mdd_crypto.key_disk));
649 sr_hotplug_unregister(sd, sr_crypto_hotplug);
651 - for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
652 - crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
653 - sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
654 - }
655 + sr_crypto_free_sessions(sd);
657 - mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
658 - while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
659 - TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
661 - if (crwu->cr_dmabuf != NULL)
662 + TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) {
663 + crwu = (struct sr_crypto_wu *)wu;
664 + if (crwu->cr_dmabuf)
665 dma_free(crwu->cr_dmabuf, MAXPHYS);
666 - if (crwu->cr_crp) {
667 - /* twiddle cryptoreq back */
668 - crwu->cr_crp->crp_desc = crwu->cr_descs;
669 + if (crwu->cr_crp)
670 crypto_freereq(crwu->cr_crp);
671 - }
672 - free(crwu, M_DEVBUF);
674 - mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
676 sr_wu_free(sd);
677 sr_ccb_free(sd);
678 @@ -1165,65 +1099,60 @@
679 sr_crypto_rw(struct sr_workunit *wu)
681 struct sr_crypto_wu *crwu;
682 - int s, rv = 0;
683 + daddr_t blkno;
684 + int rv = 0;
686 - DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
687 + DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
688 DEVNAME(wu->swu_dis->sd_sc), wu);
690 + if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
691 + return (1);
693 if (wu->swu_xs->flags & SCSI_DATA_OUT) {
694 - crwu = sr_crypto_wu_get(wu, 1);
695 - if (crwu == NULL)
696 - return (1);
697 + crwu = sr_crypto_prepare(wu, 1);
698 crwu->cr_crp->crp_callback = sr_crypto_write;
699 - s = splvm();
700 - if (crypto_invoke(crwu->cr_crp))
701 - rv = 1;
702 - else
703 + rv = crypto_dispatch(crwu->cr_crp);
704 + if (rv == 0)
705 rv = crwu->cr_crp->crp_etype;
706 - splx(s);
707 } else
708 - rv = sr_crypto_rw2(wu, NULL);
709 + rv = sr_crypto_dev_rw(wu, NULL);
711 return (rv);
714 -int
715 +void
716 sr_crypto_write(struct cryptop *crp)
718 struct sr_crypto_wu *crwu = crp->crp_opaque;
719 - struct sr_workunit *wu = crwu->cr_wu;
720 + struct sr_workunit *wu = &crwu->cr_wu;
721 int s;
723 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
724 + DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n",
725 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
727 if (crp->crp_etype) {
728 /* fail io */
729 wu->swu_xs->error = XS_DRIVER_STUFFUP;
730 s = splbio();
731 - sr_crypto_finish_io(wu);
732 + sr_scsi_done(wu->swu_dis, wu->swu_xs);
733 splx(s);
736 - return (sr_crypto_rw2(wu, crwu));
737 + sr_crypto_dev_rw(wu, crwu);
740 int
741 -sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
742 +sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
744 struct sr_discipline *sd = wu->swu_dis;
745 struct scsi_xfer *xs = wu->swu_xs;
746 struct sr_ccb *ccb;
747 struct uio *uio;
748 - int s;
749 - daddr64_t blk;
750 + daddr_t blkno;
752 - if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
753 - goto bad;
754 + blkno = wu->swu_blk_start;
756 - blk += sd->sd_meta->ssd_data_offset;
758 - ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
759 + ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
760 if (!ccb) {
761 /* should never happen but handle more gracefully */
762 printf("%s: %s: too many ccbs queued\n",
763 @@ -1236,17 +1165,10 @@
764 ccb->ccb_opaque = crwu;
766 sr_wu_enqueue_ccb(wu, ccb);
767 + sr_schedule_wu(wu);
769 - s = splbio();
771 - if (sr_check_io_collision(wu))
772 - goto queued;
774 - sr_raid_startwu(wu);
776 -queued:
777 - splx(s);
778 return (0);
780 bad:
781 /* wu is unwound by sr_wu_put */
782 if (crwu)
783 @@ -1259,77 +1181,39 @@
785 struct scsi_xfer *xs = wu->swu_xs;
786 struct sr_crypto_wu *crwu;
787 - struct sr_ccb *ccb;
788 int s;
790 /* If this was a successful read, initiate decryption of the data. */
791 if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
792 - /* only fails on implementation error */
793 - crwu = sr_crypto_wu_get(wu, 0);
794 - if (crwu == NULL)
795 - panic("sr_crypto_intr: no wu");
796 + crwu = sr_crypto_prepare(wu, 0);
797 crwu->cr_crp->crp_callback = sr_crypto_read;
798 - ccb = TAILQ_FIRST(&wu->swu_ccb);
799 - if (ccb == NULL)
800 - panic("sr_crypto_done: no ccbs on workunit");
801 - ccb->ccb_opaque = crwu;
802 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
803 + DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n",
804 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
805 - s = splvm();
806 - crypto_invoke(crwu->cr_crp);
807 - splx(s);
808 + crypto_dispatch(crwu->cr_crp);
809 return;
812 s = splbio();
813 - sr_crypto_finish_io(wu);
814 + sr_scsi_done(wu->swu_dis, wu->swu_xs);
815 splx(s);
818 void
819 -sr_crypto_finish_io(struct sr_workunit *wu)
820 -{
821 - struct sr_discipline *sd = wu->swu_dis;
822 - struct scsi_xfer *xs = wu->swu_xs;
823 - struct sr_ccb *ccb;
824 -#ifdef SR_DEBUG
825 - struct sr_softc *sc = sd->sd_sc;
826 -#endif /* SR_DEBUG */
828 - splassert(IPL_BIO);
830 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
831 - DEVNAME(sc), wu, xs);
833 - if (wu->swu_cb_active == 1)
834 - panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
835 - TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
836 - if (ccb->ccb_opaque == NULL)
837 - continue;
838 - sr_crypto_wu_put(ccb->ccb_opaque);
839 - }
841 - sr_scsi_done(sd, xs);
842 -}
844 -int
845 sr_crypto_read(struct cryptop *crp)
847 struct sr_crypto_wu *crwu = crp->crp_opaque;
848 - struct sr_workunit *wu = crwu->cr_wu;
849 + struct sr_workunit *wu = &crwu->cr_wu;
850 int s;
852 - DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
853 + DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n",
854 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
856 if (crp->crp_etype)
857 wu->swu_xs->error = XS_DRIVER_STUFFUP;
859 s = splbio();
860 - sr_crypto_finish_io(wu);
861 + sr_scsi_done(wu->swu_dis, wu->swu_xs);
862 splx(s);
864 - return (0);
867 void