summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/key_gen.c29
-rw-r--r--drivers/crypto/nx/nx-842.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c8
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c2
9 files changed, 48 insertions, 51 deletions
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 871703c..e1eaf4f 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
- int ret = 0;
+ int ret = -ENOMEM;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
- return -ENOMEM;
+ return ret;
}
- init_job_desc(desc, 0);
-
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_in)) {
dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
+ goto out_free;
}
+
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ goto out_unmap_in;
+ }
+
+ init_job_desc(desc, 0);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- kfree(desc);
- return -ENOMEM;
- }
append_fifo_store(desc, dma_addr_out, split_key_len,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
+out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
-
+out_free:
kfree(desc);
-
return ret;
}
EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 061407d..887196e 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1009,9 +1009,9 @@ error_out:
* notifier_to_errno() to decode this value
*/
static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
- void *update)
+ void *data)
{
- struct of_prop_reconfig *upd = update;
+ struct of_reconfig_data *upd = data;
struct nx842_devdata *local_devdata;
struct device_node *node = NULL;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 62f43ab..2ed4256 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -196,8 +196,7 @@ struct adf_accel_dev {
struct dentry *debugfs_dir;
struct list_head list;
struct module *owner;
- uint8_t accel_id;
- uint8_t numa_node;
struct adf_accel_pci accel_pci_dev;
+ uint8_t accel_id;
} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 5890992..7dd54aa 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -420,9 +420,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
- ring->inflights = kzalloc_node(sizeof(atomic_t),
- GFP_KERNEL,
- accel_dev->numa_node);
+ ring->inflights =
+ kzalloc_node(sizeof(atomic_t),
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights)
goto err;
} else {
@@ -470,13 +471,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data)
return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data);
- etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+ etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) {
ret = -ENOMEM;
goto err_bank;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 31076ea..19eea1c 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -599,7 +599,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!n))
return -EINVAL;
- bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+ bufl = kmalloc_node(sz, GFP_ATOMIC,
+ dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl))
return -ENOMEM;
@@ -608,6 +609,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
for_each_sg(assoc, sg, assoc_n, i) {
+ if (!sg->length)
+ continue;
bufl->bufers[bufs].addr = dma_map_single(dev,
sg_virt(sg),
sg->length,
@@ -643,7 +646,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
buflout = kmalloc_node(sz, GFP_ATOMIC,
- inst->accel_dev->numa_node);
+ dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err;
bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 0d59bcb..828f2a6 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
- if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+ if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
+ dev_to_node(&GET_DEV(accel_dev)) < 0)
+ && adf_dev_started(accel_dev))
break;
accel_dev = NULL;
}
if (!accel_dev) {
- pr_err("QAT: Could not find device on give node\n");
+ pr_err("QAT: Could not find device on node %d\n", node);
accel_dev = adf_devmgr_get_first();
}
if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!inst)
goto err;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
index 978d6c5..53c491b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
uint64_t reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 0d0435a..948f66b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev);
}
-static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
-{
- unsigned int bus_per_cpu = 0;
- struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
-
- if (!c->phys_proc_id)
- return 0;
-
- bus_per_cpu = 256 / (c->phys_proc_id + 1);
-
- if (bus_per_cpu != 0)
- return pdev->bus->number / bus_per_cpu;
- return 0;
-}
-
static int qat_dev_start(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- uint8_t node;
int ret;
switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- node = adf_get_dev_node_id(pdev);
- accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /* If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow. */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
- accel_dev->numa_node = node;
INIT_LIST_HEAD(&accel_dev->crypto_list);
/* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
- hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index c9212b9..fe8f896 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
uint32_t msix_num_entries = hw_data->num_banks + 1;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
- GFP_KERNEL, accel_dev->numa_node);
+ GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!entries)
return -ENOMEM;
OpenPOWER on IntegriCloud