55careful_write(
char *buf,
int offset,
int max,
const char *fmt, ...)
57 static char s[PAGE_SIZE];
63 s[PAGE_SIZE - 1] =
'\0';
66 i = vsnprintf(s, PAGE_SIZE - 1, fmt, args);
67 if((offset + i) > max)
69 memcpy(buf + offset, s, i);
84set_dek_table_entry(
struct device *dev,
const char *buf,
size_t len,
dek_table_e table)
87 struct Scsi_Host *shost = class_to_shost(dev);
91 if(!capable(CAP_SYS_ADMIN))
100 sscanf(buf,
"%d", &
index);
117set_dek_table_entry0(
struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
119 return set_dek_table_entry(dev, buf, len,
DEK_TABLE_0);
131set_dek_table_entry1(
struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
133 return set_dek_table_entry(dev, buf, len,
DEK_TABLE_1);
146show_dek_table_entry(
struct device *dev,
char *buf,
unsigned int table)
150 struct Scsi_Host *sh = class_to_shost(dev);
151 ag_card_t *
pCard = (ag_card_t *) sh->hostdata;
160 pDekTable = pRscInfo->tiLoLevelResource.loLevelMem.mem[
DEK_MEM_INDEX_1].virtPtr;
162 pDekTable = pRscInfo->tiLoLevelResource.loLevelMem.mem[
DEK_MEM_INDEX_2].virtPtr;
163 if(pDekTable ==
NULL)
167 i += careful_write(buf, i, PAGE_SIZE,
"%4d: ",
pCard->
dek_index[table]);
170 i += careful_write(buf, i, PAGE_SIZE,
"%02x", p[j]);
172 i += careful_write(buf, i, PAGE_SIZE,
"\n");
194show_dek_table_entry0(
struct device *dev,
struct device_attribute *attr,
char *buf)
196 return show_dek_table_entry(dev, buf,
DEK_TABLE_0);
208show_dek_table_entry1(
struct device *dev,
struct device_attribute *attr,
char *buf)
210 return show_dek_table_entry(dev, buf,
DEK_TABLE_1);
222show_kek_table(
struct device *dev,
struct device_attribute *attr,
char *buf)
226 struct Scsi_Host *sh = class_to_shost(dev);
227 ag_card_t *
pCard = (ag_card_t *) sh->hostdata;
236 i += careful_write(buf, i, PAGE_SIZE,
"%02x", p[j]);
238 i += careful_write(buf, i, PAGE_SIZE,
"\n");
240 i += careful_write(buf, i, PAGE_SIZE,
"\n");
259show_dek_kek_map(
struct device *dev,
char *buf,
unsigned int table)
262 struct Scsi_Host *sh = class_to_shost(dev);
263 ag_card_t *
pCard = (ag_card_t *) sh->hostdata;
271 i += careful_write(buf, i, PAGE_SIZE,
"Table %d\n", table);
272 i += careful_write(buf, i, PAGE_SIZE,
"=======\n");
276 i += sprintf(buf + i,
"\n");
296show_dek_kek_map0(
struct device *dev,
struct device_attribute *attr,
char *buf)
298 return show_dek_kek_map(dev, buf, 0);
310show_dek_kek_map1(
struct device *dev,
struct device_attribute *attr,
char *buf)
312 return show_dek_kek_map(dev, buf, 1);
324show_target_dek_map(
struct device *dev,
struct device_attribute *attr,
char *buf)
329 struct list_head *lh;
330 struct Scsi_Host *sh = class_to_shost(dev);
331 ag_card_t *
pCard = (ag_card_t *) sh->hostdata;
338#ifdef REPORT_ALL_LUNS
343 list_for_each_entry(p, lh, list) {
345 i += careful_write(buf, i, PAGE_SIZE,
" %u:%u:%u: %x %8x %8x %16lx %16lx %08x:%08x %1x\n", chan,
device,
lun, p->
dekTable, p->
dekIndex, p->
kekIndex, p->
lbaMin, p->
lbaMax, p->keyTag[1], p->keyTag[0], p->keyTagCheck);
348#ifdef REPORT_ALL_LUNS
389 printf(
"%s: Unknown dek table %d\n", __FUNCTION__,
dek_table);
394 *addr = (
U32_64) __pa(&pDekTable[0]);
396 *addr = (
U32_64) virt_to_phys(&pDekTable[0]);
428 printf(
"%s: Bad dek table.\n", __FUNCTION__);
433 printf(
"%s: Bad kek index.\n", __FUNCTION__);
453 printf(
"%s: Bad kek index.\n", __FUNCTION__);
457 printf(
"%s: Bad kek wrapper index.\n", __FUNCTION__);
474agtiapi_MapDek(ag_card_t *
pCard, EncryptDeviceDekMap_t *dek_map)
479 unsigned long long lba_min, lba_max;
481 struct list_head *lh;
483 chan = dek_map->channel;
487 lba_min = dek_map->dekMapEntry[0].startLBA;
488 lba_max = dek_map->dekMapEntry[0].endLBA;
490 dek_table = dek_map->dekMapEntry[0].dek.dekTable;
491 dek_index = dek_map->dekMapEntry[0].dek.dekIndex;
495 printf(
"%s: Bad channel %d.\n", __FUNCTION__, chan);
499 printf(
"%s: Bad device %d.\n", __FUNCTION__,
device);
503 printf(
"%s: Bad lun %d.\n", __FUNCTION__,
lun);
515 printf(
"%s: Bad dek table %d.\n", __FUNCTION__,
dek_table);
520 if (lba_min >= lba_max) {
521 printf(
"%s: Bad lba min and lba max: %llx %llx.\n", __FUNCTION__, lba_min, lba_max);
533 list_for_each_entry_safe(p, n, lh, list) {
534 if (p->
lbaMin == lba_min &&
542 mempool_free(p,
pCard->map_mempool);
554 printf(
"%s: Unable to allocate from memory pool.\n", __FUNCTION__);
564 p->keyTagCheck = dek_map->keytag_check;
565 memcpy(&p->keyTag, &dek_map->keytag,
sizeof(p->keyTag));
568 list_for_each_entry(n, lh, list) {
577 printf(
"%s: WARNING: New entry lba range overlap: %llx - %llx vs %llx - %llx.\n", __FUNCTION__, p->
lbaMin, p->
lbaMax, n->
lbaMin, n->
lbaMax);
582 list_add(&p->
list, lh);
587 printf(
"%s: Bad flags %08x\n", __FUNCTION__, dek_map->dekMapEntry[0].flags);
594#ifdef HIALEAH_ENCRYPTION
608 printf(
"agtiapi_SetupEncryption: HIALEAH_ENCRYPTION\n");
612 printf(
"agtiapi_SetupEncryption: HIALEAH_ENCRYPTION tiCOMEncryptGetInfo Status 0x%x\n",
status);
619 printf(
"agtiapi_SetupEncryption: HIALEAH_ENCRYPTION not set\n");
625#ifdef ENCRYPT_ENHANCE
648 printf(
"Unable to create uma_zcreate cache for encryption map mempool.\n");
655 INIT_LIST_HEAD(&
pCard->ioerr_queue);
672 printf(
"Unable to create kmem cache for encryption IO error mempool.\n");
694#ifdef ENCRYPT_ENHANCE
697 struct list_head *lh;
704 list_for_each_entry_safe(p, n, lh, list) {
716#ifdef ENCRYPT_ENHANCE
737 if (
pCard->map_mempool) {
738 mempool_destroy(
pCard->map_mempool);
739 printf(
"Encryption Map mempool released.\n");
751 list_for_each_entry_safe(ioerr, tmp, &
pCard->ioerr_queue, list) {
752 list_del_init(&ioerr->
list);
753 mempool_free(ioerr,
pCard->ioerr_mempool);
757 if (
pCard->ioerr_mempool) {
758 mempool_destroy(
pCard->ioerr_mempool);
759 printf(
"Encryption IO Error mempool released.\n");
782 int rv, rc = 0, skip_wait = 0;
784 IoctlTISAEncrypt_t *ioctl_data = &pIoctlPayload->body;
787 init_completion(&
pCard->ioctl_completion);
791 printf(
"%s: WARNING: Attempting encryption management update with outstanding encrypted IOs!\n", __FUNCTION__);
793printf(
"%s: Minor %d\n", __FUNCTION__, pIoctlPayload->hdr.MinorFunction);
794 switch(pIoctlPayload->hdr.MinorFunction) {
803 u32 reg_val = 0, new_cipher_mode = 0;
804 IoctlEncryptSetMode_t *set_mode = (IoctlEncryptSetMode_t *) &ioctl_data->request;
806 printf(
"%s: input %08x\n", __FUNCTION__, set_mode->securityCipherMode);
828 printf(
"%s: Setting security cipher mode to: 0x%08x\n", __FUNCTION__, reg_val);
837 IoctlEncryptKekAdd_t *kek_add = (IoctlEncryptKekAdd_t *) &ioctl_data->request;
838 printf(
"%s: Add kek at index 0x%x wrapper 0x%x format 0x%x\n", __FUNCTION__, kek_add->kekIndex, kek_add->wrapperKekIndex, kek_add->blobFormat);
841 if(access_ok(kek_add->EncryptKekBlob,
sizeof(kek_blob))) {
842 printf(
"%s: Starting copy from user %p to kernel %p\n", __FUNCTION__, kek_add->EncryptKekBlob, &kek_blob);
843 if((rv = copy_from_user(&kek_blob, kek_add->EncryptKekBlob,
sizeof(kek_blob))) != 0) {
844 printf(
"%s: Copy error, %d left\n", __FUNCTION__, rv);
851 if(agtiapi_AddKek(
pCard, kek_add->kekIndex, kek_add->wrapperKekIndex, &kek_blob) < 0) {
863 IoctlEncryptDekAdd_t *dek_add = (IoctlEncryptDekAdd_t *) &ioctl_data->request;
867 bit32 blob_format = dek_add->dekBlobFormat;
868 bit32 entry_sz = dek_add->dekTableKeyEntrySize;
871 memset(addr_table, 0,
sizeof(addr_table));
873 printf(
"%s: Add dek at index 0x%x, table %x, kek index %x, blob format %x, entry size %x\n", __FUNCTION__,
dek_index,
dek_table,
kek_index, blob_format, entry_sz);
876 if(access_ok(dek_add->dekBlob,
sizeof(dek_blob))) {
877 printf(
"%s: Starting copy from user %p to kernel %p\n", __FUNCTION__, dek_add->dekBlob, &dek_blob);
878 if((rv = copy_from_user(&dek_blob, dek_add->dekBlob,
sizeof(dek_blob))) != 0) {
879 printf(
"%s: Copy error, %d left\n", __FUNCTION__, rv);
887 memcpy(addr_table, &addr,
sizeof(addr));
903 IoctlEncryptDekInvalidate_t *dek_to_invalidate = (IoctlEncryptDekInvalidate_t *) &ioctl_data->request;
904 printf(
"%s: Invalidating dek at index 0x%x, table %x\n", __FUNCTION__, dek_to_invalidate->dek.dekIndex, dek_to_invalidate->dek.dekTable);
917 IoctlEncryptDekMapTable_t *p_dek_map = (IoctlEncryptDekMapTable_t *) &ioctl_data->request;
922 printf(
"%s: Host %u: Mapping %u:%u:%u (%llx to %llx) to dek at index 0x%x, table %x, keytag %08x:%08x\n", __FUNCTION__, p_dek_map->dekMap[0].host, p_dek_map->dekMap[0].channel, p_dek_map->dekMap[0].device, p_dek_map->dekMap[0].lun, p_dek_map->dekMap[0].dekMapEntry[0].startLBA, p_dek_map->dekMap[0].dekMapEntry[0].endLBA, p_dek_map->dekMap[0].dekMapEntry[0].dek.dekIndex, p_dek_map->dekMap[0].dekMapEntry[0].dek.dekTable, p_dek_map->dekMap[0].keytag[1], p_dek_map->dekMap[0].keytag[0]);
925 if (agtiapi_MapDek(
pCard, &p_dek_map->dekMap[0]) < 0) {
934 ioctl_data->subEvent = 0;
939 unsigned long flags, i, query_flag;
941 IoctlEncryptErrorQuery_t *perr = (IoctlEncryptErrorQuery_t *) &ioctl_data->request;
943 printf(
"%s: query flag %x\n", __FUNCTION__, perr->query_flag);
944 query_flag = perr->query_flag;
947 memset(perr, 0,
sizeof(IoctlEncryptErrorQuery_t));
956 list_for_each_entry_safe(ioerr, tmp, &
pCard->ioerr_queue, list) {
960 perr->valid_mask |= (1 << i);
961 memcpy(&perr->error[i], &ioerr->ioerr,
sizeof(IoctlEncryptIOError_t));
962 list_del_init(&ioerr->
list);
963 mempool_free(ioerr,
pCard->ioerr_mempool);
972 if (!perr->valid_mask) {
975 if (wait_event_interruptible(ioerr_waitq, (
atomic_read(&ioerr_queue_count)))) {
980 goto error_query_restart;
988 ioctl_data->subEvent = 0;
992 printf(
"%s: Unrecognized Minor Function %d\n", __FUNCTION__, pIoctlPayload->hdr.MinorFunction);
1002 wait_for_completion(&
pCard->ioctl_completion);
1004 pIoctlPayload->hdr.Status = ioctl_data->status;
1010 printf(
"%s: Status: %d\n", __FUNCTION__, rc);
1015 printf(
"%s: Encryption ioctl %d successful.\n", __FUNCTION__, pIoctlPayload->hdr.MinorFunction);
1034 printf(
"%s: Cipher mode not yet set.\n", __FUNCTION__);
1079 pccb->
flags &= ~ENCRYPTED_IO;
1081#ifdef ENCRYPT_ENHANCE
1093 unsigned long flags, qdepth;
1094 struct scsi_cmnd *cmd;
1100 printf(
"%s: Malformed pccb %p.\n", __FUNCTION__, pccb);
1108 printf(
"%s: Skipping IO %lx: Not Encrypted.\n", __FUNCTION__, cmd->serial_number);
1115 printf(
"%s: Not queueing IO error due to queue full: %lu entries.\n", __FUNCTION__, qdepth);
1123 printf(
"%s: Mempool allocation failure.\n", __FUNCTION__);
1128 perr->ioerr.error_id = cmd->serial_number;
1129 perr->ioerr.timestamp = cmd->jiffies_at_alloc;
1130 perr->ioerr.host = (
unsigned int) cmd->device->host->host_no;
1131 perr->ioerr.channel = cmd->device->channel;
1132 perr->ioerr.device = cmd->device->id;
1133 perr->ioerr.lun = cmd->device->lun;
1134 perr->ioerr.scsi_cmd = (
unsigned int) cmd->cmnd[0];
1149 printf(
"%s: Unrecognized encrypted IO completion error status: %d\n", __FUNCTION__, pccb->
scsiStatus);
1150 perr->ioerr.error_type = 0xffffffff;
1156 list_add_tail(&perr->
list, &
pCard->ioerr_queue);
1161 wake_up_interruptible(&ioerr_waitq);
#define AGTIAPI_MAX_CHANNEL_NUM
atomic_t outstanding_encrypted_io_count
#define DEK_INDEX_INVALID
#define ENCRYPTION_MAP_MEMPOOL_SIZE
#define CIPHER_MODE_INVALID
#define ENCRYPTION_IO_ERR_MEMPOOL_SIZE
#define KEK_TABLE_MAX_ENTRY
#define ENCRYPT_DEK_MAP_ENTRY_VALID
#define ERROR_QUERY_FLAG_BLOCK
IoctlEncryptErrorQuery_t encryptErrorQuery
#define ENCRYPT_DEK_MAP_ENTRY_CLEAR
#define DEK_MAX_TABLE_ITEMS
#define MAP_TABLE_ENTRY(pC, c, d, l)
void agtiapi_CleanupEncryptedIO(struct agtiapi_softc *pCard, ccb_t *pccb)
int agtiapi_SetupEncryptionPools(struct agtiapi_softc *pCard)
struct agtiapi_softc * pCard
#define IOERR_QUEUE_DEPTH_MAX
void agtiapi_HandleEncryptedIOFailure(ag_device_t *pDev, ccb_t *pccb)
void agtiapi_CleanupEncryption(struct agtiapi_softc *pCard)
int agtiapi_SetupEncryptedIO(struct agtiapi_softc *pCard, ccb_t *pccb, unsigned long long block)
int agtiapi_SetupEncryption(struct agtiapi_softc *pCard)
void agtiapi_CleanupEncryptionPools(struct agtiapi_softc *pCard)
osGLOBAL void ostiCacheFlush(tiRoot_t *ptiRoot, void *osMemHandle, void *virtPtr, bit32 length)
tiSuperScsiInitiatorRequest_t tiSuperScsiRequest
ag_resource_info_t tiRscInfo
tiLoLevelResource_t tiLoLevelResource
unsigned long long lbaMin
unsigned long long lbaMax
tiEncryptKekBlob_t kekBlob
struct kmem_cache * map_cache
char ioerr_cache_name[32]
ag_dek_kek_map_t dek_kek_map[DEK_MAX_TABLES][DEK_MAX_TABLE_ITEMS]
struct list_head * encrypt_map
struct mtx ioerr_queue_lock
ag_card_info_t * pCardInfo
ag_kek_table_t kek_table[KEK_TABLE_MAX_ENTRY]
struct kmem_cache * ioerr_cache
tiMem_t mem[MAX_LL_LAYER_MEM_DESCRIPTORS]
tiLoLevelMem_t loLevelMem
#define IOCTL_MN_ENCRYPTION_DEK_ADD
#define IOCTL_MN_ENCRYPTION_GET_INFO
#define IOCTL_MN_ENCRYPTION_KEK_NVRAM
#define IOCTL_MN_ENCRYPTION_DEK_ASSIGN
#define IOCTL_MN_ENCRYPTION_ERROR_QUERY
#define IOCTL_MN_ENCRYPTION_KEK_ADD
#define IOCTL_MN_ENCRYPTION_SET_MODE
#define IOCTL_MN_ENCRYPTION_DEK_INVALID
osGLOBAL bit32 tiCOMEncryptGetInfo(tiRoot_t *tiRoot)
osGLOBAL bit32 tiCOMEncryptSetMode(tiRoot_t *tiRoot, bit32 securityCipherMode)
osGLOBAL bit32 tiCOMEncryptKekAdd(tiRoot_t *tiRoot, bit32 kekIndex, bit32 wrapperKekIndex, bit32 blobFormat, tiEncryptKekBlob_t *encryptKekBlob)
osGLOBAL bit32 tiCOMEncryptDekAdd(tiRoot_t *tiRoot, bit32 kekIndex, bit32 dekTableSelect, bit32 dekAddrHi, bit32 dekAddrLo, bit32 dekIndex, bit32 dekNumberOfEntries, bit32 dekBlobFormat, bit32 dekTableKeyEntrySize)
osGLOBAL bit32 tiCOMEncryptDekInvalidate(tiRoot_t *tiRoot, bit32 dekTable, bit32 dekIndex)
#define TI_ENCRYPT_SEC_MODE_B
@ tiDetailDekKeyCacheMiss
#define TI_ENCRYPT_SEC_MODE_A
#define TI_ENCRYPT_ATTRIB_CIPHER_XTS
#define IOCTL_ERR_STATUS_NOT_SUPPORTED
#define IOCTL_ERR_STATUS_INVALID_CODE
#define TI_SCSI_INITIATOR_ENCRYPT
#define TI_ENCRYPT_MODE_XTS_AES
#define TI_ENCRYPT_SEC_MODE_FACT_INIT
#define IOCTL_CALL_SUCCESS