1 c285a1f8 2020-11-06 stsp /* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
3 c285a1f8 2020-11-06 stsp * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 c285a1f8 2020-11-06 stsp * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 c285a1f8 2020-11-06 stsp * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 c285a1f8 2020-11-06 stsp * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
8 c285a1f8 2020-11-06 stsp * Permission to use, copy, modify, and distribute this software for any
9 c285a1f8 2020-11-06 stsp * purpose with or without fee is hereby granted, provided that the above
10 c285a1f8 2020-11-06 stsp * copyright notice and this permission notice appear in all copies.
12 c285a1f8 2020-11-06 stsp * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 c285a1f8 2020-11-06 stsp * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 c285a1f8 2020-11-06 stsp * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 c285a1f8 2020-11-06 stsp * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 c285a1f8 2020-11-06 stsp * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 c285a1f8 2020-11-06 stsp * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 c285a1f8 2020-11-06 stsp * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 c285a1f8 2020-11-06 stsp #include "bio.h"
23 c285a1f8 2020-11-06 stsp #include <sys/param.h>
24 c285a1f8 2020-11-06 stsp #include <sys/systm.h>
25 c285a1f8 2020-11-06 stsp #include <sys/buf.h>
26 c285a1f8 2020-11-06 stsp #include <sys/device.h>
27 c285a1f8 2020-11-06 stsp #include <sys/ioctl.h>
28 c285a1f8 2020-11-06 stsp #include <sys/proc.h>
29 c285a1f8 2020-11-06 stsp #include <sys/malloc.h>
30 c285a1f8 2020-11-06 stsp #include <sys/pool.h>
31 c285a1f8 2020-11-06 stsp #include <sys/kernel.h>
32 c285a1f8 2020-11-06 stsp #include <sys/disk.h>
33 c285a1f8 2020-11-06 stsp #include <sys/rwlock.h>
34 c285a1f8 2020-11-06 stsp #include <sys/queue.h>
35 c285a1f8 2020-11-06 stsp #include <sys/fcntl.h>
36 c285a1f8 2020-11-06 stsp #include <sys/disklabel.h>
37 c285a1f8 2020-11-06 stsp #include <sys/mount.h>
38 c285a1f8 2020-11-06 stsp #include <sys/sensors.h>
39 c285a1f8 2020-11-06 stsp #include <sys/stat.h>
40 c285a1f8 2020-11-06 stsp #include <sys/conf.h>
41 c285a1f8 2020-11-06 stsp #include <sys/uio.h>
42 c285a1f8 2020-11-06 stsp #include <sys/dkio.h>
44 c285a1f8 2020-11-06 stsp #include <crypto/cryptodev.h>
45 c285a1f8 2020-11-06 stsp #include <crypto/cryptosoft.h>
46 c285a1f8 2020-11-06 stsp #include <crypto/rijndael.h>
47 c285a1f8 2020-11-06 stsp #include <crypto/md5.h>
48 c285a1f8 2020-11-06 stsp #include <crypto/sha1.h>
49 c285a1f8 2020-11-06 stsp #include <crypto/sha2.h>
50 c285a1f8 2020-11-06 stsp #include <crypto/hmac.h>
52 c285a1f8 2020-11-06 stsp #include <scsi/scsi_all.h>
53 c285a1f8 2020-11-06 stsp #include <scsi/scsiconf.h>
54 c285a1f8 2020-11-06 stsp #include <scsi/scsi_disk.h>
56 c285a1f8 2020-11-06 stsp #include <dev/softraidvar.h>
57 c285a1f8 2020-11-06 stsp #include <dev/rndvar.h>
60 c285a1f8 2020-11-06 stsp * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
61 c285a1f8 2020-11-06 stsp * to start failing when memory pressure kicks in. We can store this in the WU
62 c285a1f8 2020-11-06 stsp * because we assert that only one ccb per WU will ever be active.
64 c285a1f8 2020-11-06 stsp struct sr_crypto_wu {
65 c285a1f8 2020-11-06 stsp TAILQ_ENTRY(sr_crypto_wu) cr_link;
66 c285a1f8 2020-11-06 stsp struct uio cr_uio;
67 c285a1f8 2020-11-06 stsp struct iovec cr_iov;
68 c285a1f8 2020-11-06 stsp struct cryptop *cr_crp;
69 c285a1f8 2020-11-06 stsp struct cryptodesc *cr_descs;
70 c285a1f8 2020-11-06 stsp struct sr_workunit *cr_wu;
71 c285a1f8 2020-11-06 stsp void *cr_dmabuf;
75 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
76 c285a1f8 2020-11-06 stsp void sr_crypto_wu_put(struct sr_crypto_wu *);
77 c285a1f8 2020-11-06 stsp int sr_crypto_create_keys(struct sr_discipline *);
78 c285a1f8 2020-11-06 stsp int sr_crypto_get_kdf(struct bioc_createraid *,
79 c285a1f8 2020-11-06 stsp struct sr_discipline *);
80 c285a1f8 2020-11-06 stsp int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
81 c285a1f8 2020-11-06 stsp int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
82 c285a1f8 2020-11-06 stsp int sr_crypto_decrypt_key(struct sr_discipline *);
83 c285a1f8 2020-11-06 stsp int sr_crypto_change_maskkey(struct sr_discipline *,
84 c285a1f8 2020-11-06 stsp struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
85 c285a1f8 2020-11-06 stsp int sr_crypto_create(struct sr_discipline *,
86 c285a1f8 2020-11-06 stsp struct bioc_createraid *, int, int64_t);
87 c285a1f8 2020-11-06 stsp int sr_crypto_assemble(struct sr_discipline *,
88 c285a1f8 2020-11-06 stsp struct bioc_createraid *, int, void *);
89 c285a1f8 2020-11-06 stsp int sr_crypto_alloc_resources(struct sr_discipline *);
90 c285a1f8 2020-11-06 stsp void sr_crypto_free_resources(struct sr_discipline *);
91 c285a1f8 2020-11-06 stsp int sr_crypto_ioctl(struct sr_discipline *,
92 c285a1f8 2020-11-06 stsp struct bioc_discipline *);
93 c285a1f8 2020-11-06 stsp int sr_crypto_meta_opt_handler(struct sr_discipline *,
94 c285a1f8 2020-11-06 stsp struct sr_meta_opt_hdr *);
95 c285a1f8 2020-11-06 stsp int sr_crypto_write(struct cryptop *);
96 c285a1f8 2020-11-06 stsp int sr_crypto_rw(struct sr_workunit *);
97 c285a1f8 2020-11-06 stsp int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
98 c285a1f8 2020-11-06 stsp void sr_crypto_done(struct sr_workunit *);
99 c285a1f8 2020-11-06 stsp int sr_crypto_read(struct cryptop *);
100 c285a1f8 2020-11-06 stsp void sr_crypto_finish_io(struct sr_workunit *);
101 c285a1f8 2020-11-06 stsp void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
102 c285a1f8 2020-11-06 stsp u_int8_t *, int, u_char *);
103 c285a1f8 2020-11-06 stsp void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
105 c285a1f8 2020-11-06 stsp #ifdef SR_DEBUG0
106 c285a1f8 2020-11-06 stsp void sr_crypto_dumpkeys(struct sr_discipline *);
109 c285a1f8 2020-11-06 stsp /* Discipline initialisation. */
111 c285a1f8 2020-11-06 stsp sr_crypto_discipline_init(struct sr_discipline *sd)
115 c285a1f8 2020-11-06 stsp /* Fill out discipline members. */
116 c285a1f8 2020-11-06 stsp sd->sd_type = SR_MD_CRYPTO;
117 c285a1f8 2020-11-06 stsp strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
118 c285a1f8 2020-11-06 stsp sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
119 c285a1f8 2020-11-06 stsp sd->sd_max_wu = SR_CRYPTO_NOWU;
121 c285a1f8 2020-11-06 stsp for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
122 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
124 c285a1f8 2020-11-06 stsp /* Setup discipline specific function pointers. */
125 c285a1f8 2020-11-06 stsp sd->sd_alloc_resources = sr_crypto_alloc_resources;
126 c285a1f8 2020-11-06 stsp sd->sd_assemble = sr_crypto_assemble;
127 c285a1f8 2020-11-06 stsp sd->sd_create = sr_crypto_create;
128 c285a1f8 2020-11-06 stsp sd->sd_free_resources = sr_crypto_free_resources;
129 c285a1f8 2020-11-06 stsp sd->sd_ioctl_handler = sr_crypto_ioctl;
130 c285a1f8 2020-11-06 stsp sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
131 c285a1f8 2020-11-06 stsp sd->sd_scsi_rw = sr_crypto_rw;
132 c285a1f8 2020-11-06 stsp sd->sd_scsi_done = sr_crypto_done;
136 c285a1f8 2020-11-06 stsp sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
137 c285a1f8 2020-11-06 stsp int no_chunk, int64_t coerced_size)
139 c285a1f8 2020-11-06 stsp struct sr_meta_opt_item *omi;
140 c285a1f8 2020-11-06 stsp int rv = EINVAL;
142 c285a1f8 2020-11-06 stsp if (no_chunk != 1) {
143 c285a1f8 2020-11-06 stsp sr_error(sd->sd_sc, "%s requires exactly one chunk",
144 c285a1f8 2020-11-06 stsp sd->sd_name);
148 c285a1f8 2020-11-06 stsp /* Create crypto optional metadata. */
149 c285a1f8 2020-11-06 stsp omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
150 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO);
151 c285a1f8 2020-11-06 stsp omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
152 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO);
153 c285a1f8 2020-11-06 stsp omi->omi_som->som_type = SR_OPT_CRYPTO;
154 c285a1f8 2020-11-06 stsp omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
155 c285a1f8 2020-11-06 stsp SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
156 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
157 c285a1f8 2020-11-06 stsp sd->sd_meta->ssdi.ssd_opt_no++;
159 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.key_disk = NULL;
161 c285a1f8 2020-11-06 stsp if (bc->bc_key_disk != NODEV) {
163 c285a1f8 2020-11-06 stsp /* Create a key disk. */
164 c285a1f8 2020-11-06 stsp if (sr_crypto_get_kdf(bc, sd))
166 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.key_disk =
167 c285a1f8 2020-11-06 stsp sr_crypto_create_key_disk(sd, bc->bc_key_disk);
168 c285a1f8 2020-11-06 stsp if (sd->mds.mdd_crypto.key_disk == NULL)
170 c285a1f8 2020-11-06 stsp sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
172 c285a1f8 2020-11-06 stsp } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
174 c285a1f8 2020-11-06 stsp /* No hint available yet. */
175 c285a1f8 2020-11-06 stsp bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
176 c285a1f8 2020-11-06 stsp rv = EAGAIN;
179 c285a1f8 2020-11-06 stsp } else if (sr_crypto_get_kdf(bc, sd))
182 c285a1f8 2020-11-06 stsp /* Passphrase volumes cannot be automatically assembled. */
183 c285a1f8 2020-11-06 stsp if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
186 c285a1f8 2020-11-06 stsp sd->sd_meta->ssdi.ssd_size = coerced_size;
188 c285a1f8 2020-11-06 stsp sr_crypto_create_keys(sd);
190 c285a1f8 2020-11-06 stsp sd->sd_max_ccb_per_wu = no_chunk;
194 c285a1f8 2020-11-06 stsp return (rv);
198 c285a1f8 2020-11-06 stsp sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
199 c285a1f8 2020-11-06 stsp int no_chunk, void *data)
201 c285a1f8 2020-11-06 stsp int rv = EINVAL;
203 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.key_disk = NULL;
205 c285a1f8 2020-11-06 stsp /* Crypto optional metadata must already exist... */
206 c285a1f8 2020-11-06 stsp if (sd->mds.mdd_crypto.scr_meta == NULL)
209 c285a1f8 2020-11-06 stsp if (data != NULL) {
210 c285a1f8 2020-11-06 stsp /* Kernel already has mask key. */
211 c285a1f8 2020-11-06 stsp bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
212 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey));
213 c285a1f8 2020-11-06 stsp } else if (bc->bc_key_disk != NODEV) {
214 c285a1f8 2020-11-06 stsp /* Read the mask key from the key disk. */
215 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.key_disk =
216 c285a1f8 2020-11-06 stsp sr_crypto_read_key_disk(sd, bc->bc_key_disk);
217 c285a1f8 2020-11-06 stsp if (sd->mds.mdd_crypto.key_disk == NULL)
219 c285a1f8 2020-11-06 stsp } else if (bc->bc_opaque_flags & BIOC_SOOUT) {
220 c285a1f8 2020-11-06 stsp /* provide userland with kdf hint */
221 c285a1f8 2020-11-06 stsp if (bc->bc_opaque == NULL)
224 c285a1f8 2020-11-06 stsp if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
225 c285a1f8 2020-11-06 stsp bc->bc_opaque_size)
228 c285a1f8 2020-11-06 stsp if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
229 c285a1f8 2020-11-06 stsp bc->bc_opaque, bc->bc_opaque_size))
232 c285a1f8 2020-11-06 stsp /* we're done */
233 c285a1f8 2020-11-06 stsp bc->bc_opaque_status = BIOC_SOINOUT_OK;
234 c285a1f8 2020-11-06 stsp rv = EAGAIN;
236 c285a1f8 2020-11-06 stsp } else if (bc->bc_opaque_flags & BIOC_SOIN) {
237 c285a1f8 2020-11-06 stsp /* get kdf with maskkey from userland */
238 c285a1f8 2020-11-06 stsp if (sr_crypto_get_kdf(bc, sd))
243 c285a1f8 2020-11-06 stsp sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
247 c285a1f8 2020-11-06 stsp return (rv);
250 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *
251 c285a1f8 2020-11-06 stsp sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
253 c285a1f8 2020-11-06 stsp struct scsi_xfer *xs = wu->swu_xs;
254 c285a1f8 2020-11-06 stsp struct sr_discipline *sd = wu->swu_dis;
255 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu;
256 c285a1f8 2020-11-06 stsp struct cryptodesc *crd;
257 c285a1f8 2020-11-06 stsp int flags, i, n;
258 c285a1f8 2020-11-06 stsp daddr64_t blk = 0;
259 c285a1f8 2020-11-06 stsp u_int keyndx;
261 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
262 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc), wu, encrypt);
264 c285a1f8 2020-11-06 stsp mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
265 c285a1f8 2020-11-06 stsp if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
266 c285a1f8 2020-11-06 stsp TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
267 c285a1f8 2020-11-06 stsp mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
268 c285a1f8 2020-11-06 stsp if (crwu == NULL)
269 c285a1f8 2020-11-06 stsp panic("sr_crypto_wu_get: out of wus");
271 c285a1f8 2020-11-06 stsp crwu->cr_uio.uio_iovcnt = 1;
272 c285a1f8 2020-11-06 stsp crwu->cr_uio.uio_iov->iov_len = xs->datalen;
273 c285a1f8 2020-11-06 stsp if (xs->flags & SCSI_DATA_OUT) {
274 c285a1f8 2020-11-06 stsp crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
275 c285a1f8 2020-11-06 stsp bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
277 c285a1f8 2020-11-06 stsp crwu->cr_uio.uio_iov->iov_base = xs->data;
279 c285a1f8 2020-11-06 stsp if (xs->cmdlen == 10)
280 c285a1f8 2020-11-06 stsp blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
281 c285a1f8 2020-11-06 stsp else if (xs->cmdlen == 16)
282 c285a1f8 2020-11-06 stsp blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
283 c285a1f8 2020-11-06 stsp else if (xs->cmdlen == 6)
284 c285a1f8 2020-11-06 stsp blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
286 c285a1f8 2020-11-06 stsp n = xs->datalen >> DEV_BSHIFT;
289 c285a1f8 2020-11-06 stsp * We preallocated enough crypto descs for up to MAXPHYS of I/O.
290 c285a1f8 2020-11-06 stsp * Since there may be less than that we need to tweak the linked list
291 c285a1f8 2020-11-06 stsp * of crypto desc structures to be just long enough for our needs.
293 c285a1f8 2020-11-06 stsp crd = crwu->cr_descs;
294 c285a1f8 2020-11-06 stsp for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
295 c285a1f8 2020-11-06 stsp crd = crd->crd_next;
296 c285a1f8 2020-11-06 stsp KASSERT(crd);
298 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_desc = crd;
299 c285a1f8 2020-11-06 stsp flags = (encrypt ? CRD_F_ENCRYPT : 0) |
300 c285a1f8 2020-11-06 stsp CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
302 c285a1f8 2020-11-06 stsp /* Select crypto session based on block number */
303 c285a1f8 2020-11-06 stsp keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
304 c285a1f8 2020-11-06 stsp if (keyndx >= SR_CRYPTO_MAXKEYS)
305 c285a1f8 2020-11-06 stsp goto unwind;
306 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
307 c285a1f8 2020-11-06 stsp if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
308 c285a1f8 2020-11-06 stsp goto unwind;
310 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_ilen = xs->datalen;
311 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_alloctype = M_DEVBUF;
312 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_buf = &crwu->cr_uio;
313 c285a1f8 2020-11-06 stsp for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
314 c285a1f8 2020-11-06 stsp i++, blk++, crd = crd->crd_next) {
315 c285a1f8 2020-11-06 stsp crd->crd_skip = i << DEV_BSHIFT;
316 c285a1f8 2020-11-06 stsp crd->crd_len = DEV_BSIZE;
317 c285a1f8 2020-11-06 stsp crd->crd_inject = 0;
318 c285a1f8 2020-11-06 stsp crd->crd_flags = flags;
319 c285a1f8 2020-11-06 stsp crd->crd_alg = CRYPTO_AES_XTS;
321 c285a1f8 2020-11-06 stsp switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
322 c285a1f8 2020-11-06 stsp case SR_CRYPTOA_AES_XTS_128:
323 c285a1f8 2020-11-06 stsp crd->crd_klen = 256;
325 c285a1f8 2020-11-06 stsp case SR_CRYPTOA_AES_XTS_256:
326 c285a1f8 2020-11-06 stsp crd->crd_klen = 512;
329 c285a1f8 2020-11-06 stsp goto unwind;
331 c285a1f8 2020-11-06 stsp crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
332 c285a1f8 2020-11-06 stsp bcopy(&blk, crd->crd_iv, sizeof(blk));
334 c285a1f8 2020-11-06 stsp crwu->cr_wu = wu;
335 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_opaque = crwu;
337 c285a1f8 2020-11-06 stsp return (crwu);
340 c285a1f8 2020-11-06 stsp /* steal the descriptors back from the cryptop */
341 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_desc = NULL;
343 c285a1f8 2020-11-06 stsp return (NULL);
347 c285a1f8 2020-11-06 stsp sr_crypto_wu_put(struct sr_crypto_wu *crwu)
349 c285a1f8 2020-11-06 stsp struct cryptop *crp = crwu->cr_crp;
350 c285a1f8 2020-11-06 stsp struct sr_workunit *wu = crwu->cr_wu;
351 c285a1f8 2020-11-06 stsp struct sr_discipline *sd = wu->swu_dis;
353 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
354 c285a1f8 2020-11-06 stsp DEVNAME(wu->swu_dis->sd_sc), crwu);
356 c285a1f8 2020-11-06 stsp /* steal the descriptors back from the cryptop */
357 c285a1f8 2020-11-06 stsp crp->crp_desc = NULL;
359 c285a1f8 2020-11-06 stsp mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
360 c285a1f8 2020-11-06 stsp TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
361 c285a1f8 2020-11-06 stsp mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
365 c285a1f8 2020-11-06 stsp sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
367 c285a1f8 2020-11-06 stsp int rv = EINVAL;
368 c285a1f8 2020-11-06 stsp struct sr_crypto_kdfinfo *kdfinfo;
370 c285a1f8 2020-11-06 stsp if (!(bc->bc_opaque_flags & BIOC_SOIN))
371 c285a1f8 2020-11-06 stsp return (rv);
372 c285a1f8 2020-11-06 stsp if (bc->bc_opaque == NULL)
373 c285a1f8 2020-11-06 stsp return (rv);
374 c285a1f8 2020-11-06 stsp if (bc->bc_opaque_size != sizeof(*kdfinfo))
375 c285a1f8 2020-11-06 stsp return (rv);
377 c285a1f8 2020-11-06 stsp kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
378 c285a1f8 2020-11-06 stsp if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
381 c285a1f8 2020-11-06 stsp if (kdfinfo->len != bc->bc_opaque_size)
384 c285a1f8 2020-11-06 stsp /* copy KDF hint to disk meta data */
385 c285a1f8 2020-11-06 stsp if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
386 c285a1f8 2020-11-06 stsp if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
387 c285a1f8 2020-11-06 stsp kdfinfo->genkdf.len)
389 c285a1f8 2020-11-06 stsp bcopy(&kdfinfo->genkdf,
390 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
391 c285a1f8 2020-11-06 stsp kdfinfo->genkdf.len);
394 c285a1f8 2020-11-06 stsp /* copy mask key to run-time meta data */
395 c285a1f8 2020-11-06 stsp if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
396 c285a1f8 2020-11-06 stsp if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
397 c285a1f8 2020-11-06 stsp sizeof(kdfinfo->maskkey))
399 c285a1f8 2020-11-06 stsp bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
400 c285a1f8 2020-11-06 stsp sizeof(kdfinfo->maskkey));
403 c285a1f8 2020-11-06 stsp bc->bc_opaque_status = BIOC_SOINOUT_OK;
406 c285a1f8 2020-11-06 stsp explicit_bzero(kdfinfo, bc->bc_opaque_size);
407 c285a1f8 2020-11-06 stsp free(kdfinfo, M_DEVBUF);
409 c285a1f8 2020-11-06 stsp return (rv);
413 c285a1f8 2020-11-06 stsp sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
415 c285a1f8 2020-11-06 stsp rijndael_ctx ctx;
416 c285a1f8 2020-11-06 stsp int i, rv = 1;
418 c285a1f8 2020-11-06 stsp switch (alg) {
419 c285a1f8 2020-11-06 stsp case SR_CRYPTOM_AES_ECB_256:
420 c285a1f8 2020-11-06 stsp if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
422 c285a1f8 2020-11-06 stsp for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
423 c285a1f8 2020-11-06 stsp rijndael_encrypt(&ctx, &p[i], &c[i]);
427 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
428 c285a1f8 2020-11-06 stsp "softraid", alg);
434 c285a1f8 2020-11-06 stsp explicit_bzero(&ctx, sizeof(ctx));
435 c285a1f8 2020-11-06 stsp return (rv);
439 c285a1f8 2020-11-06 stsp sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
441 c285a1f8 2020-11-06 stsp rijndael_ctx ctx;
442 c285a1f8 2020-11-06 stsp int i, rv = 1;
444 c285a1f8 2020-11-06 stsp switch (alg) {
445 c285a1f8 2020-11-06 stsp case SR_CRYPTOM_AES_ECB_256:
446 c285a1f8 2020-11-06 stsp if (rijndael_set_key(&ctx, key, 256) != 0)
448 c285a1f8 2020-11-06 stsp for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
449 c285a1f8 2020-11-06 stsp rijndael_decrypt(&ctx, &c[i], &p[i]);
453 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
454 c285a1f8 2020-11-06 stsp "softraid", alg);
460 c285a1f8 2020-11-06 stsp explicit_bzero(&ctx, sizeof(ctx));
461 c285a1f8 2020-11-06 stsp return (rv);
465 c285a1f8 2020-11-06 stsp sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
466 c285a1f8 2020-11-06 stsp u_int8_t *key, int key_size, u_char *check_digest)
468 c285a1f8 2020-11-06 stsp u_char check_key[SHA1_DIGEST_LENGTH];
469 c285a1f8 2020-11-06 stsp HMAC_SHA1_CTX hmacctx;
470 c285a1f8 2020-11-06 stsp SHA1_CTX shactx;
472 c285a1f8 2020-11-06 stsp bzero(check_key, sizeof(check_key));
473 c285a1f8 2020-11-06 stsp bzero(&hmacctx, sizeof(hmacctx));
474 c285a1f8 2020-11-06 stsp bzero(&shactx, sizeof(shactx));
476 c285a1f8 2020-11-06 stsp /* k = SHA1(mask_key) */
477 c285a1f8 2020-11-06 stsp SHA1Init(&shactx);
478 c285a1f8 2020-11-06 stsp SHA1Update(&shactx, maskkey, maskkey_size);
479 c285a1f8 2020-11-06 stsp SHA1Final(check_key, &shactx);
481 c285a1f8 2020-11-06 stsp /* mac = HMAC_SHA1_k(unencrypted key) */
482 c285a1f8 2020-11-06 stsp HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
483 c285a1f8 2020-11-06 stsp HMAC_SHA1_Update(&hmacctx, key, key_size);
484 c285a1f8 2020-11-06 stsp HMAC_SHA1_Final(check_digest, &hmacctx);
486 c285a1f8 2020-11-06 stsp explicit_bzero(check_key, sizeof(check_key));
487 c285a1f8 2020-11-06 stsp explicit_bzero(&hmacctx, sizeof(hmacctx));
488 c285a1f8 2020-11-06 stsp explicit_bzero(&shactx, sizeof(shactx));
492 c285a1f8 2020-11-06 stsp sr_crypto_decrypt_key(struct sr_discipline *sd)
494 c285a1f8 2020-11-06 stsp u_char check_digest[SHA1_DIGEST_LENGTH];
495 c285a1f8 2020-11-06 stsp int rv = 1;
497 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
499 c285a1f8 2020-11-06 stsp if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
502 c285a1f8 2020-11-06 stsp if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
503 c285a1f8 2020-11-06 stsp (u_char *)sd->mds.mdd_crypto.scr_key,
504 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
505 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
508 c285a1f8 2020-11-06 stsp #ifdef SR_DEBUG0
509 c285a1f8 2020-11-06 stsp sr_crypto_dumpkeys(sd);
512 c285a1f8 2020-11-06 stsp /* Check that the key decrypted properly. */
513 c285a1f8 2020-11-06 stsp sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
514 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey),
515 c285a1f8 2020-11-06 stsp (u_int8_t *)sd->mds.mdd_crypto.scr_key,
516 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_key),
517 c285a1f8 2020-11-06 stsp check_digest);
518 c285a1f8 2020-11-06 stsp if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
519 c285a1f8 2020-11-06 stsp check_digest, sizeof(check_digest)) != 0) {
520 c285a1f8 2020-11-06 stsp explicit_bzero(sd->mds.mdd_crypto.scr_key,
521 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_key));
525 c285a1f8 2020-11-06 stsp rv = 0; /* Success */
527 c285a1f8 2020-11-06 stsp /* we don't need the mask key anymore */
528 c285a1f8 2020-11-06 stsp explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
529 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey));
531 c285a1f8 2020-11-06 stsp explicit_bzero(check_digest, sizeof(check_digest));
537 c285a1f8 2020-11-06 stsp sr_crypto_create_keys(struct sr_discipline *sd)
540 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
541 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc));
543 c285a1f8 2020-11-06 stsp if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
544 c285a1f8 2020-11-06 stsp return (1);
546 c285a1f8 2020-11-06 stsp /* XXX allow user to specify */
547 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
549 c285a1f8 2020-11-06 stsp /* generate crypto keys */
550 c285a1f8 2020-11-06 stsp arc4random_buf(sd->mds.mdd_crypto.scr_key,
551 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_key));
553 c285a1f8 2020-11-06 stsp /* Mask the disk keys. */
554 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
555 c285a1f8 2020-11-06 stsp sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
556 c285a1f8 2020-11-06 stsp (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
557 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
558 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
560 c285a1f8 2020-11-06 stsp /* Prepare key decryption check code. */
561 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
562 c285a1f8 2020-11-06 stsp sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
563 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey),
564 c285a1f8 2020-11-06 stsp (u_int8_t *)sd->mds.mdd_crypto.scr_key,
565 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_key),
566 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
568 c285a1f8 2020-11-06 stsp /* Erase the plaintext disk keys */
569 c285a1f8 2020-11-06 stsp explicit_bzero(sd->mds.mdd_crypto.scr_key,
570 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_key));
572 c285a1f8 2020-11-06 stsp #ifdef SR_DEBUG0
573 c285a1f8 2020-11-06 stsp sr_crypto_dumpkeys(sd);
576 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
577 c285a1f8 2020-11-06 stsp SR_CRYPTOF_KDFHINT;
579 c285a1f8 2020-11-06 stsp return (0);
583 c285a1f8 2020-11-06 stsp sr_crypto_change_maskkey(struct sr_discipline *sd,
584 c285a1f8 2020-11-06 stsp struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
586 c285a1f8 2020-11-06 stsp u_char check_digest[SHA1_DIGEST_LENGTH];
587 c285a1f8 2020-11-06 stsp u_char *c, *p = NULL;
588 c285a1f8 2020-11-06 stsp size_t ksz;
589 c285a1f8 2020-11-06 stsp int rv = 1;
591 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
592 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc));
594 c285a1f8 2020-11-06 stsp if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
597 c285a1f8 2020-11-06 stsp c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
598 c285a1f8 2020-11-06 stsp ksz = sizeof(sd->mds.mdd_crypto.scr_key);
599 c285a1f8 2020-11-06 stsp p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
600 c285a1f8 2020-11-06 stsp if (p == NULL)
603 c285a1f8 2020-11-06 stsp if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
604 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
607 c285a1f8 2020-11-06 stsp #ifdef SR_DEBUG0
608 c285a1f8 2020-11-06 stsp sr_crypto_dumpkeys(sd);
611 c285a1f8 2020-11-06 stsp sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
612 c285a1f8 2020-11-06 stsp sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
613 c285a1f8 2020-11-06 stsp if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
614 c285a1f8 2020-11-06 stsp check_digest, sizeof(check_digest)) != 0) {
615 c285a1f8 2020-11-06 stsp sr_error(sd->sd_sc, "incorrect key or passphrase");
616 c285a1f8 2020-11-06 stsp rv = EPERM;
620 c285a1f8 2020-11-06 stsp /* Mask the disk keys. */
621 c285a1f8 2020-11-06 stsp c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
622 c285a1f8 2020-11-06 stsp if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
623 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
626 c285a1f8 2020-11-06 stsp /* Prepare key decryption check code. */
627 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
628 c285a1f8 2020-11-06 stsp sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
629 c285a1f8 2020-11-06 stsp sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
630 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
632 c285a1f8 2020-11-06 stsp /* Copy new encrypted key and HMAC to metadata. */
633 c285a1f8 2020-11-06 stsp bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
634 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
636 c285a1f8 2020-11-06 stsp rv = 0; /* Success */
640 c285a1f8 2020-11-06 stsp explicit_bzero(p, ksz);
641 c285a1f8 2020-11-06 stsp free(p, M_DEVBUF);
644 c285a1f8 2020-11-06 stsp explicit_bzero(check_digest, sizeof(check_digest));
645 c285a1f8 2020-11-06 stsp explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
646 c285a1f8 2020-11-06 stsp explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
648 c285a1f8 2020-11-06 stsp return (rv);
651 c285a1f8 2020-11-06 stsp struct sr_chunk *
652 c285a1f8 2020-11-06 stsp sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
654 c285a1f8 2020-11-06 stsp struct sr_softc *sc = sd->sd_sc;
655 c285a1f8 2020-11-06 stsp struct sr_discipline *fakesd = NULL;
656 c285a1f8 2020-11-06 stsp struct sr_metadata *sm = NULL;
657 c285a1f8 2020-11-06 stsp struct sr_meta_chunk *km;
658 c285a1f8 2020-11-06 stsp struct sr_meta_opt_item *omi = NULL;
659 c285a1f8 2020-11-06 stsp struct sr_meta_keydisk *skm;
660 c285a1f8 2020-11-06 stsp struct sr_chunk *key_disk = NULL;
661 c285a1f8 2020-11-06 stsp struct disklabel label;
662 c285a1f8 2020-11-06 stsp struct vnode *vn;
663 c285a1f8 2020-11-06 stsp char devname[32];
664 c285a1f8 2020-11-06 stsp int c, part, open = 0;
667 c285a1f8 2020-11-06 stsp * Create a metadata structure on the key disk and store
668 c285a1f8 2020-11-06 stsp * keying material in the optional metadata.
671 c285a1f8 2020-11-06 stsp sr_meta_getdevname(sc, dev, devname, sizeof(devname));
673 c285a1f8 2020-11-06 stsp /* Make sure chunk is not already in use. */
674 c285a1f8 2020-11-06 stsp c = sr_chunk_in_use(sc, dev);
675 c285a1f8 2020-11-06 stsp if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
676 c285a1f8 2020-11-06 stsp sr_error(sc, "%s is already in use", devname);
680 c285a1f8 2020-11-06 stsp /* Open device. */
681 c285a1f8 2020-11-06 stsp if (bdevvp(dev, &vn)) {
682 c285a1f8 2020-11-06 stsp sr_error(sc, "cannot open key disk %s", devname);
685 c285a1f8 2020-11-06 stsp if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
686 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
687 c285a1f8 2020-11-06 stsp "open %s\n", DEVNAME(sc), devname);
691 c285a1f8 2020-11-06 stsp open = 1; /* close dev on error */
693 c285a1f8 2020-11-06 stsp /* Get partition details. */
694 c285a1f8 2020-11-06 stsp part = DISKPART(dev);
695 c285a1f8 2020-11-06 stsp if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
696 c285a1f8 2020-11-06 stsp FREAD, NOCRED, curproc)) {
697 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
698 c285a1f8 2020-11-06 stsp "failed\n", DEVNAME(sc));
699 c285a1f8 2020-11-06 stsp VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
703 c285a1f8 2020-11-06 stsp if (label.d_secsize != DEV_BSIZE) {
704 c285a1f8 2020-11-06 stsp sr_error(sc, "%s has unsupported sector size (%d)",
705 c285a1f8 2020-11-06 stsp devname, label.d_secsize);
708 c285a1f8 2020-11-06 stsp if (label.d_partitions[part].p_fstype != FS_RAID) {
709 c285a1f8 2020-11-06 stsp sr_error(sc, "%s partition not of type RAID (%d)\n",
710 c285a1f8 2020-11-06 stsp devname, label.d_partitions[part].p_fstype);
715 c285a1f8 2020-11-06 stsp * Create and populate chunk metadata.
718 c285a1f8 2020-11-06 stsp key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
719 c285a1f8 2020-11-06 stsp km = &key_disk->src_meta;
721 c285a1f8 2020-11-06 stsp key_disk->src_dev_mm = dev;
722 c285a1f8 2020-11-06 stsp key_disk->src_vn = vn;
723 c285a1f8 2020-11-06 stsp strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
724 c285a1f8 2020-11-06 stsp key_disk->src_size = 0;
726 c285a1f8 2020-11-06 stsp km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
727 c285a1f8 2020-11-06 stsp km->scmi.scm_chunk_id = 0;
728 c285a1f8 2020-11-06 stsp km->scmi.scm_size = 0;
729 c285a1f8 2020-11-06 stsp km->scmi.scm_coerced_size = 0;
730 c285a1f8 2020-11-06 stsp strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
731 c285a1f8 2020-11-06 stsp bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
732 c285a1f8 2020-11-06 stsp sizeof(struct sr_uuid));
734 c285a1f8 2020-11-06 stsp sr_checksum(sc, km, &km->scm_checksum,
735 c285a1f8 2020-11-06 stsp sizeof(struct sr_meta_chunk_invariant));
737 c285a1f8 2020-11-06 stsp km->scm_status = BIOC_SDONLINE;
740 c285a1f8 2020-11-06 stsp * Create and populate our own discipline and metadata.
743 c285a1f8 2020-11-06 stsp sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
744 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_magic = SR_MAGIC;
745 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_version = SR_META_VERSION;
746 c285a1f8 2020-11-06 stsp sm->ssd_ondisk = 0;
747 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_vol_flags = 0;
748 c285a1f8 2020-11-06 stsp bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
749 c285a1f8 2020-11-06 stsp sizeof(struct sr_uuid));
750 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_chunk_no = 1;
751 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
752 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
753 c285a1f8 2020-11-06 stsp sm->ssdi.ssd_size = 0;
754 c285a1f8 2020-11-06 stsp strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
755 c285a1f8 2020-11-06 stsp snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
756 c285a1f8 2020-11-06 stsp "SR %s", "KEYDISK");
757 c285a1f8 2020-11-06 stsp snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
758 c285a1f8 2020-11-06 stsp "%03d", SR_META_VERSION);
760 c285a1f8 2020-11-06 stsp fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
761 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO);
762 c285a1f8 2020-11-06 stsp fakesd->sd_sc = sd->sd_sc;
763 c285a1f8 2020-11-06 stsp fakesd->sd_meta = sm;
764 c285a1f8 2020-11-06 stsp fakesd->sd_meta_type = SR_META_F_NATIVE;
765 c285a1f8 2020-11-06 stsp fakesd->sd_vol_status = BIOC_SVONLINE;
766 c285a1f8 2020-11-06 stsp strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
767 c285a1f8 2020-11-06 stsp SLIST_INIT(&fakesd->sd_meta_opt);
769 c285a1f8 2020-11-06 stsp /* Add chunk to volume. */
770 c285a1f8 2020-11-06 stsp fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
771 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO);
772 c285a1f8 2020-11-06 stsp fakesd->sd_vol.sv_chunks[0] = key_disk;
773 c285a1f8 2020-11-06 stsp SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
774 c285a1f8 2020-11-06 stsp SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
776 c285a1f8 2020-11-06 stsp /* Generate mask key. */
777 c285a1f8 2020-11-06 stsp arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
778 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey));
780 c285a1f8 2020-11-06 stsp /* Copy mask key to optional metadata area. */
781 c285a1f8 2020-11-06 stsp omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
782 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO);
783 c285a1f8 2020-11-06 stsp omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
784 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO);
785 c285a1f8 2020-11-06 stsp omi->omi_som->som_type = SR_OPT_KEYDISK;
786 c285a1f8 2020-11-06 stsp omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
787 c285a1f8 2020-11-06 stsp skm = (struct sr_meta_keydisk *)omi->omi_som;
788 c285a1f8 2020-11-06 stsp bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
789 c285a1f8 2020-11-06 stsp sizeof(skm->skm_maskkey));
790 c285a1f8 2020-11-06 stsp SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
791 c285a1f8 2020-11-06 stsp fakesd->sd_meta->ssdi.ssd_opt_no++;
793 c285a1f8 2020-11-06 stsp /* Save metadata. */
794 c285a1f8 2020-11-06 stsp if (sr_meta_save(fakesd, SR_META_DIRTY)) {
795 c285a1f8 2020-11-06 stsp sr_error(sc, "could not save metadata to %s", devname);
802 c285a1f8 2020-11-06 stsp if (key_disk)
803 c285a1f8 2020-11-06 stsp free(key_disk, M_DEVBUF);
804 c285a1f8 2020-11-06 stsp key_disk = NULL;
808 c285a1f8 2020-11-06 stsp free(omi, M_DEVBUF);
809 c285a1f8 2020-11-06 stsp if (fakesd && fakesd->sd_vol.sv_chunks)
810 c285a1f8 2020-11-06 stsp free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
811 c285a1f8 2020-11-06 stsp if (fakesd)
812 c285a1f8 2020-11-06 stsp free(fakesd, M_DEVBUF);
814 c285a1f8 2020-11-06 stsp free(sm, M_DEVBUF);
815 c285a1f8 2020-11-06 stsp if (open) {
816 c285a1f8 2020-11-06 stsp VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
820 c285a1f8 2020-11-06 stsp return key_disk;
823 c285a1f8 2020-11-06 stsp struct sr_chunk *
824 c285a1f8 2020-11-06 stsp sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
826 c285a1f8 2020-11-06 stsp struct sr_softc *sc = sd->sd_sc;
827 c285a1f8 2020-11-06 stsp struct sr_metadata *sm = NULL;
828 c285a1f8 2020-11-06 stsp struct sr_meta_opt_item *omi, *omi_next;
829 c285a1f8 2020-11-06 stsp struct sr_meta_opt_hdr *omh;
830 c285a1f8 2020-11-06 stsp struct sr_meta_keydisk *skm;
831 c285a1f8 2020-11-06 stsp struct sr_meta_opt_head som;
832 c285a1f8 2020-11-06 stsp struct sr_chunk *key_disk = NULL;
833 c285a1f8 2020-11-06 stsp struct disklabel label;
834 c285a1f8 2020-11-06 stsp struct vnode *vn = NULL;
835 c285a1f8 2020-11-06 stsp char devname[32];
836 c285a1f8 2020-11-06 stsp int c, part, open = 0;
839 c285a1f8 2020-11-06 stsp * Load a key disk and load keying material into memory.
842 c285a1f8 2020-11-06 stsp SLIST_INIT(&som);
844 c285a1f8 2020-11-06 stsp sr_meta_getdevname(sc, dev, devname, sizeof(devname));
846 c285a1f8 2020-11-06 stsp /* Make sure chunk is not already in use. */
847 c285a1f8 2020-11-06 stsp c = sr_chunk_in_use(sc, dev);
848 c285a1f8 2020-11-06 stsp if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
849 c285a1f8 2020-11-06 stsp sr_error(sc, "%s is already in use", devname);
853 c285a1f8 2020-11-06 stsp /* Open device. */
854 c285a1f8 2020-11-06 stsp if (bdevvp(dev, &vn)) {
855 c285a1f8 2020-11-06 stsp sr_error(sc, "cannot open key disk %s", devname);
858 c285a1f8 2020-11-06 stsp if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
859 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
860 c285a1f8 2020-11-06 stsp "open %s\n", DEVNAME(sc), devname);
864 c285a1f8 2020-11-06 stsp open = 1; /* close dev on error */
866 c285a1f8 2020-11-06 stsp /* Get partition details. */
867 c285a1f8 2020-11-06 stsp part = DISKPART(dev);
868 c285a1f8 2020-11-06 stsp if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
869 c285a1f8 2020-11-06 stsp NOCRED, curproc)) {
870 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
871 c285a1f8 2020-11-06 stsp "failed\n", DEVNAME(sc));
872 c285a1f8 2020-11-06 stsp VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
876 c285a1f8 2020-11-06 stsp if (label.d_secsize != DEV_BSIZE) {
877 c285a1f8 2020-11-06 stsp sr_error(sc, "%s has unsupported sector size (%d)",
878 c285a1f8 2020-11-06 stsp devname, label.d_secsize);
881 c285a1f8 2020-11-06 stsp if (label.d_partitions[part].p_fstype != FS_RAID) {
882 c285a1f8 2020-11-06 stsp sr_error(sc, "%s partition not of type RAID (%d)\n",
883 c285a1f8 2020-11-06 stsp devname, label.d_partitions[part].p_fstype);
888 c285a1f8 2020-11-06 stsp * Read and validate key disk metadata.
890 c285a1f8 2020-11-06 stsp sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
891 c285a1f8 2020-11-06 stsp if (sr_meta_native_read(sd, dev, sm, NULL)) {
892 c285a1f8 2020-11-06 stsp sr_error(sc, "native bootprobe could not read native metadata");
896 c285a1f8 2020-11-06 stsp if (sr_meta_validate(sd, dev, sm, NULL)) {
897 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_META, "%s: invalid metadata\n",
898 c285a1f8 2020-11-06 stsp DEVNAME(sc));
902 c285a1f8 2020-11-06 stsp /* Make sure this is a key disk. */
903 c285a1f8 2020-11-06 stsp if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
904 c285a1f8 2020-11-06 stsp sr_error(sc, "%s is not a key disk", devname);
908 c285a1f8 2020-11-06 stsp /* Construct key disk chunk. */
909 c285a1f8 2020-11-06 stsp key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
910 c285a1f8 2020-11-06 stsp key_disk->src_dev_mm = dev;
911 c285a1f8 2020-11-06 stsp key_disk->src_vn = vn;
912 c285a1f8 2020-11-06 stsp key_disk->src_size = 0;
914 c285a1f8 2020-11-06 stsp bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
915 c285a1f8 2020-11-06 stsp sizeof(key_disk->src_meta));
917 c285a1f8 2020-11-06 stsp /* Read mask key from optional metadata. */
918 c285a1f8 2020-11-06 stsp sr_meta_opt_load(sc, sm, &som);
919 c285a1f8 2020-11-06 stsp SLIST_FOREACH(omi, &som, omi_link) {
920 c285a1f8 2020-11-06 stsp omh = omi->omi_som;
921 c285a1f8 2020-11-06 stsp if (omh->som_type == SR_OPT_KEYDISK) {
922 c285a1f8 2020-11-06 stsp skm = (struct sr_meta_keydisk *)omh;
923 c285a1f8 2020-11-06 stsp bcopy(&skm->skm_maskkey,
924 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_maskkey,
925 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey));
926 c285a1f8 2020-11-06 stsp } else if (omh->som_type == SR_OPT_CRYPTO) {
927 c285a1f8 2020-11-06 stsp /* Original keydisk format with key in crypto area. */
928 c285a1f8 2020-11-06 stsp bcopy(omh + sizeof(struct sr_meta_opt_hdr),
929 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_maskkey,
930 c285a1f8 2020-11-06 stsp sizeof(sd->mds.mdd_crypto.scr_maskkey));
937 c285a1f8 2020-11-06 stsp for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
938 c285a1f8 2020-11-06 stsp omi_next = SLIST_NEXT(omi, omi_link);
939 c285a1f8 2020-11-06 stsp if (omi->omi_som)
940 c285a1f8 2020-11-06 stsp free(omi->omi_som, M_DEVBUF);
941 c285a1f8 2020-11-06 stsp free(omi, M_DEVBUF);
945 c285a1f8 2020-11-06 stsp free(sm, M_DEVBUF);
947 c285a1f8 2020-11-06 stsp if (vn && open) {
948 c285a1f8 2020-11-06 stsp VOP_CLOSE(vn, FREAD, NOCRED, curproc);
952 c285a1f8 2020-11-06 stsp return key_disk;
956 c285a1f8 2020-11-06 stsp sr_crypto_alloc_resources(struct sr_discipline *sd)
958 c285a1f8 2020-11-06 stsp struct cryptoini cri;
959 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu;
960 c285a1f8 2020-11-06 stsp u_int num_keys, i;
962 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
963 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc));
965 c285a1f8 2020-11-06 stsp for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
966 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
968 c285a1f8 2020-11-06 stsp if (sr_wu_alloc(sd)) {
969 c285a1f8 2020-11-06 stsp sr_error(sd->sd_sc, "unable to allocate work units");
970 c285a1f8 2020-11-06 stsp return (ENOMEM);
972 c285a1f8 2020-11-06 stsp if (sr_ccb_alloc(sd)) {
973 c285a1f8 2020-11-06 stsp sr_error(sd->sd_sc, "unable to allocate CCBs");
974 c285a1f8 2020-11-06 stsp return (ENOMEM);
976 c285a1f8 2020-11-06 stsp if (sr_crypto_decrypt_key(sd)) {
977 c285a1f8 2020-11-06 stsp sr_error(sd->sd_sc, "incorrect key or passphrase");
978 c285a1f8 2020-11-06 stsp return (EPERM);
982 c285a1f8 2020-11-06 stsp * For each wu allocate the uio, iovec and crypto structures.
983 c285a1f8 2020-11-06 stsp * these have to be allocated now because during runtime we can't
984 c285a1f8 2020-11-06 stsp * fail an allocation without failing the io (which can cause real
985 c285a1f8 2020-11-06 stsp * problems).
987 c285a1f8 2020-11-06 stsp mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
988 c285a1f8 2020-11-06 stsp TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
989 c285a1f8 2020-11-06 stsp for (i = 0; i < sd->sd_max_wu; i++) {
990 c285a1f8 2020-11-06 stsp crwu = malloc(sizeof(*crwu), M_DEVBUF,
991 c285a1f8 2020-11-06 stsp M_WAITOK | M_ZERO | M_CANFAIL);
992 c285a1f8 2020-11-06 stsp if (crwu == NULL)
993 c285a1f8 2020-11-06 stsp return (ENOMEM);
994 c285a1f8 2020-11-06 stsp /* put it on the list now so if we fail it'll be freed */
995 c285a1f8 2020-11-06 stsp mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
996 c285a1f8 2020-11-06 stsp TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
997 c285a1f8 2020-11-06 stsp mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
999 c285a1f8 2020-11-06 stsp crwu->cr_uio.uio_iov = &crwu->cr_iov;
1000 c285a1f8 2020-11-06 stsp crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
1001 c285a1f8 2020-11-06 stsp crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
1002 c285a1f8 2020-11-06 stsp if (crwu->cr_crp == NULL)
1003 c285a1f8 2020-11-06 stsp return (ENOMEM);
1004 c285a1f8 2020-11-06 stsp /* steal the list of cryptodescs */
1005 c285a1f8 2020-11-06 stsp crwu->cr_descs = crwu->cr_crp->crp_desc;
1006 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_desc = NULL;
1009 c285a1f8 2020-11-06 stsp bzero(&cri, sizeof(cri));
1010 c285a1f8 2020-11-06 stsp cri.cri_alg = CRYPTO_AES_XTS;
1011 c285a1f8 2020-11-06 stsp switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
1012 c285a1f8 2020-11-06 stsp case SR_CRYPTOA_AES_XTS_128:
1013 c285a1f8 2020-11-06 stsp cri.cri_klen = 256;
1015 c285a1f8 2020-11-06 stsp case SR_CRYPTOA_AES_XTS_256:
1016 c285a1f8 2020-11-06 stsp cri.cri_klen = 512;
1019 c285a1f8 2020-11-06 stsp return (EINVAL);
1022 c285a1f8 2020-11-06 stsp /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
1023 c285a1f8 2020-11-06 stsp num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
1024 c285a1f8 2020-11-06 stsp if (num_keys >= SR_CRYPTO_MAXKEYS)
1025 c285a1f8 2020-11-06 stsp return (EFBIG);
1026 c285a1f8 2020-11-06 stsp for (i = 0; i <= num_keys; i++) {
1027 c285a1f8 2020-11-06 stsp cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
1028 c285a1f8 2020-11-06 stsp if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
1029 c285a1f8 2020-11-06 stsp &cri, 0) != 0) {
1030 c285a1f8 2020-11-06 stsp for (i = 0;
1031 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
1033 c285a1f8 2020-11-06 stsp crypto_freesession(
1034 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_sid[i]);
1035 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1037 c285a1f8 2020-11-06 stsp return (EINVAL);
1041 c285a1f8 2020-11-06 stsp sr_hotplug_register(sd, sr_crypto_hotplug);
1043 c285a1f8 2020-11-06 stsp return (0);
1047 c285a1f8 2020-11-06 stsp sr_crypto_free_resources(struct sr_discipline *sd)
1049 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu;
1052 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
1053 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc));
1055 c285a1f8 2020-11-06 stsp if (sd->mds.mdd_crypto.key_disk != NULL) {
1056 c285a1f8 2020-11-06 stsp explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
1057 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.key_disk);
1058 c285a1f8 2020-11-06 stsp free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
1061 c285a1f8 2020-11-06 stsp sr_hotplug_unregister(sd, sr_crypto_hotplug);
1063 c285a1f8 2020-11-06 stsp for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
1064 c285a1f8 2020-11-06 stsp crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
1065 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1068 c285a1f8 2020-11-06 stsp mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
1069 c285a1f8 2020-11-06 stsp while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
1070 c285a1f8 2020-11-06 stsp TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
1072 c285a1f8 2020-11-06 stsp if (crwu->cr_dmabuf != NULL)
1073 c285a1f8 2020-11-06 stsp dma_free(crwu->cr_dmabuf, MAXPHYS);
1074 c285a1f8 2020-11-06 stsp if (crwu->cr_crp) {
1075 c285a1f8 2020-11-06 stsp /* twiddle cryptoreq back */
1076 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_desc = crwu->cr_descs;
1077 c285a1f8 2020-11-06 stsp crypto_freereq(crwu->cr_crp);
1079 c285a1f8 2020-11-06 stsp free(crwu, M_DEVBUF);
1081 c285a1f8 2020-11-06 stsp mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
1083 c285a1f8 2020-11-06 stsp sr_wu_free(sd);
1084 c285a1f8 2020-11-06 stsp sr_ccb_free(sd);
1088 c285a1f8 2020-11-06 stsp sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1090 c285a1f8 2020-11-06 stsp struct sr_crypto_kdfpair kdfpair;
1091 c285a1f8 2020-11-06 stsp struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1092 c285a1f8 2020-11-06 stsp int size, rv = 1;
1094 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1095 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc), bd->bd_cmd);
1097 c285a1f8 2020-11-06 stsp switch (bd->bd_cmd) {
1098 c285a1f8 2020-11-06 stsp case SR_IOCTL_GET_KDFHINT:
1100 c285a1f8 2020-11-06 stsp /* Get KDF hint for userland. */
1101 c285a1f8 2020-11-06 stsp size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1102 c285a1f8 2020-11-06 stsp if (bd->bd_data == NULL || bd->bd_size > size)
1104 c285a1f8 2020-11-06 stsp if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1105 c285a1f8 2020-11-06 stsp bd->bd_data, bd->bd_size))
1112 c285a1f8 2020-11-06 stsp case SR_IOCTL_CHANGE_PASSPHRASE:
1114 c285a1f8 2020-11-06 stsp /* Attempt to change passphrase. */
1116 c285a1f8 2020-11-06 stsp size = sizeof(kdfpair);
1117 c285a1f8 2020-11-06 stsp if (bd->bd_data == NULL || bd->bd_size > size)
1119 c285a1f8 2020-11-06 stsp if (copyin(bd->bd_data, &kdfpair, size))
1122 c285a1f8 2020-11-06 stsp size = sizeof(kdfinfo1);
1123 c285a1f8 2020-11-06 stsp if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1125 c285a1f8 2020-11-06 stsp if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1128 c285a1f8 2020-11-06 stsp size = sizeof(kdfinfo2);
1129 c285a1f8 2020-11-06 stsp if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1131 c285a1f8 2020-11-06 stsp if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1134 c285a1f8 2020-11-06 stsp if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1137 c285a1f8 2020-11-06 stsp /* Save metadata to disk. */
1138 c285a1f8 2020-11-06 stsp rv = sr_meta_save(sd, SR_META_DIRTY);
1144 c285a1f8 2020-11-06 stsp explicit_bzero(&kdfpair, sizeof(kdfpair));
1145 c285a1f8 2020-11-06 stsp explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1146 c285a1f8 2020-11-06 stsp explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1148 c285a1f8 2020-11-06 stsp return (rv);
1152 c285a1f8 2020-11-06 stsp sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1154 c285a1f8 2020-11-06 stsp int rv = EINVAL;
1156 c285a1f8 2020-11-06 stsp if (om->som_type == SR_OPT_CRYPTO) {
1157 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1161 c285a1f8 2020-11-06 stsp return (rv);
1165 c285a1f8 2020-11-06 stsp sr_crypto_rw(struct sr_workunit *wu)
1167 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu;
1168 c285a1f8 2020-11-06 stsp int s, rv = 0;
1170 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
1171 c285a1f8 2020-11-06 stsp DEVNAME(wu->swu_dis->sd_sc), wu);
1173 c285a1f8 2020-11-06 stsp if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1174 c285a1f8 2020-11-06 stsp crwu = sr_crypto_wu_get(wu, 1);
1175 c285a1f8 2020-11-06 stsp if (crwu == NULL)
1176 c285a1f8 2020-11-06 stsp return (1);
1177 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_callback = sr_crypto_write;
1178 c285a1f8 2020-11-06 stsp s = splvm();
1179 c285a1f8 2020-11-06 stsp if (crypto_invoke(crwu->cr_crp))
1182 c285a1f8 2020-11-06 stsp rv = crwu->cr_crp->crp_etype;
1185 c285a1f8 2020-11-06 stsp rv = sr_crypto_rw2(wu, NULL);
1187 c285a1f8 2020-11-06 stsp return (rv);
1191 c285a1f8 2020-11-06 stsp sr_crypto_write(struct cryptop *crp)
1193 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu = crp->crp_opaque;
1194 c285a1f8 2020-11-06 stsp struct sr_workunit *wu = crwu->cr_wu;
1197 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
1198 c285a1f8 2020-11-06 stsp DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1200 c285a1f8 2020-11-06 stsp if (crp->crp_etype) {
1201 c285a1f8 2020-11-06 stsp /* fail io */
1202 c285a1f8 2020-11-06 stsp wu->swu_xs->error = XS_DRIVER_STUFFUP;
1203 c285a1f8 2020-11-06 stsp s = splbio();
1204 c285a1f8 2020-11-06 stsp sr_crypto_finish_io(wu);
1208 c285a1f8 2020-11-06 stsp return (sr_crypto_rw2(wu, crwu));
1212 c285a1f8 2020-11-06 stsp sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1214 c285a1f8 2020-11-06 stsp struct sr_discipline *sd = wu->swu_dis;
1215 c285a1f8 2020-11-06 stsp struct scsi_xfer *xs = wu->swu_xs;
1216 c285a1f8 2020-11-06 stsp struct sr_ccb *ccb;
1217 c285a1f8 2020-11-06 stsp struct uio *uio;
1219 c285a1f8 2020-11-06 stsp daddr64_t blk;
1221 c285a1f8 2020-11-06 stsp if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
1224 c285a1f8 2020-11-06 stsp blk += sd->sd_meta->ssd_data_offset;
1226 c285a1f8 2020-11-06 stsp ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
1227 c285a1f8 2020-11-06 stsp if (!ccb) {
1228 c285a1f8 2020-11-06 stsp /* should never happen but handle more gracefully */
1229 c285a1f8 2020-11-06 stsp printf("%s: %s: too many ccbs queued\n",
1230 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1233 c285a1f8 2020-11-06 stsp if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1234 c285a1f8 2020-11-06 stsp uio = crwu->cr_crp->crp_buf;
1235 c285a1f8 2020-11-06 stsp ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1236 c285a1f8 2020-11-06 stsp ccb->ccb_opaque = crwu;
1238 c285a1f8 2020-11-06 stsp sr_wu_enqueue_ccb(wu, ccb);
1240 c285a1f8 2020-11-06 stsp s = splbio();
1242 c285a1f8 2020-11-06 stsp if (sr_check_io_collision(wu))
1243 c285a1f8 2020-11-06 stsp goto queued;
1245 c285a1f8 2020-11-06 stsp sr_raid_startwu(wu);
1249 c285a1f8 2020-11-06 stsp return (0);
1251 c285a1f8 2020-11-06 stsp /* wu is unwound by sr_wu_put */
1253 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_etype = EINVAL;
1254 c285a1f8 2020-11-06 stsp return (1);
1258 c285a1f8 2020-11-06 stsp sr_crypto_done(struct sr_workunit *wu)
1260 c285a1f8 2020-11-06 stsp struct scsi_xfer *xs = wu->swu_xs;
1261 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu;
1262 c285a1f8 2020-11-06 stsp struct sr_ccb *ccb;
1265 c285a1f8 2020-11-06 stsp /* If this was a successful read, initiate decryption of the data. */
1266 c285a1f8 2020-11-06 stsp if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1267 c285a1f8 2020-11-06 stsp /* only fails on implementation error */
1268 c285a1f8 2020-11-06 stsp crwu = sr_crypto_wu_get(wu, 0);
1269 c285a1f8 2020-11-06 stsp if (crwu == NULL)
1270 c285a1f8 2020-11-06 stsp panic("sr_crypto_intr: no wu");
1271 c285a1f8 2020-11-06 stsp crwu->cr_crp->crp_callback = sr_crypto_read;
1272 c285a1f8 2020-11-06 stsp ccb = TAILQ_FIRST(&wu->swu_ccb);
1273 c285a1f8 2020-11-06 stsp if (ccb == NULL)
1274 c285a1f8 2020-11-06 stsp panic("sr_crypto_done: no ccbs on workunit");
1275 c285a1f8 2020-11-06 stsp ccb->ccb_opaque = crwu;
1276 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
1277 c285a1f8 2020-11-06 stsp DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1278 c285a1f8 2020-11-06 stsp s = splvm();
1279 c285a1f8 2020-11-06 stsp crypto_invoke(crwu->cr_crp);
1284 c285a1f8 2020-11-06 stsp s = splbio();
1285 c285a1f8 2020-11-06 stsp sr_crypto_finish_io(wu);
1290 c285a1f8 2020-11-06 stsp sr_crypto_finish_io(struct sr_workunit *wu)
1292 c285a1f8 2020-11-06 stsp struct sr_discipline *sd = wu->swu_dis;
1293 c285a1f8 2020-11-06 stsp struct scsi_xfer *xs = wu->swu_xs;
1294 c285a1f8 2020-11-06 stsp struct sr_ccb *ccb;
1295 c285a1f8 2020-11-06 stsp #ifdef SR_DEBUG
1296 c285a1f8 2020-11-06 stsp struct sr_softc *sc = sd->sd_sc;
1297 c285a1f8 2020-11-06 stsp #endif /* SR_DEBUG */
1299 c285a1f8 2020-11-06 stsp splassert(IPL_BIO);
1301 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
1302 c285a1f8 2020-11-06 stsp DEVNAME(sc), wu, xs);
1304 c285a1f8 2020-11-06 stsp if (wu->swu_cb_active == 1)
1305 c285a1f8 2020-11-06 stsp panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
1306 c285a1f8 2020-11-06 stsp TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
1307 c285a1f8 2020-11-06 stsp if (ccb->ccb_opaque == NULL)
1309 c285a1f8 2020-11-06 stsp sr_crypto_wu_put(ccb->ccb_opaque);
1312 c285a1f8 2020-11-06 stsp sr_scsi_done(sd, xs);
1316 c285a1f8 2020-11-06 stsp sr_crypto_read(struct cryptop *crp)
1318 c285a1f8 2020-11-06 stsp struct sr_crypto_wu *crwu = crp->crp_opaque;
1319 c285a1f8 2020-11-06 stsp struct sr_workunit *wu = crwu->cr_wu;
1322 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
1323 c285a1f8 2020-11-06 stsp DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1325 c285a1f8 2020-11-06 stsp if (crp->crp_etype)
1326 c285a1f8 2020-11-06 stsp wu->swu_xs->error = XS_DRIVER_STUFFUP;
1328 c285a1f8 2020-11-06 stsp s = splbio();
1329 c285a1f8 2020-11-06 stsp sr_crypto_finish_io(wu);
1332 c285a1f8 2020-11-06 stsp return (0);
1336 c285a1f8 2020-11-06 stsp sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1338 c285a1f8 2020-11-06 stsp DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1339 c285a1f8 2020-11-06 stsp DEVNAME(sd->sd_sc), diskp->dk_name, action);
1342 c285a1f8 2020-11-06 stsp #ifdef SR_DEBUG0
1344 c285a1f8 2020-11-06 stsp sr_crypto_dumpkeys(struct sr_discipline *sd)
1348 c285a1f8 2020-11-06 stsp printf("sr_crypto_dumpkeys:\n");
1349 c285a1f8 2020-11-06 stsp for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1350 c285a1f8 2020-11-06 stsp printf("\tscm_key[%d]: 0x", i);
1351 c285a1f8 2020-11-06 stsp for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1352 c285a1f8 2020-11-06 stsp printf("%02x",
1353 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1355 c285a1f8 2020-11-06 stsp printf("\n");
1357 c285a1f8 2020-11-06 stsp printf("sr_crypto_dumpkeys: runtime data keys:\n");
1358 c285a1f8 2020-11-06 stsp for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1359 c285a1f8 2020-11-06 stsp printf("\tscr_key[%d]: 0x", i);
1360 c285a1f8 2020-11-06 stsp for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1361 c285a1f8 2020-11-06 stsp printf("%02x",
1362 c285a1f8 2020-11-06 stsp sd->mds.mdd_crypto.scr_key[i][j]);
1364 c285a1f8 2020-11-06 stsp printf("\n");
1367 c285a1f8 2020-11-06 stsp #endif /* SR_DEBUG */