diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
commit | b22364c8eec89e6b0c081a237f3b6348df87796f (patch) | |
tree | 233a923281fb640106465d076997ff511efb6edf /include/rdma | |
parent | 2c8dc071517ec2843869024dc82be2e246f41064 (diff) | |
parent | 66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff) | |
download | op-kernel-dev-b22364c8eec89e6b0c081a237f3b6348df87796f.zip op-kernel-dev-b22364c8eec89e6b0c081a237f3b6348df87796f.tar.gz |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/rdma')
-rw-r--r-- | include/rdma/ib_marshall.h | 5 | ||||
-rw-r--r-- | include/rdma/ib_user_mad.h | 2 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 271 | ||||
-rw-r--r-- | include/rdma/rdma_cm.h | 62 | ||||
-rw-r--r-- | include/rdma/rdma_cm_ib.h | 3 | ||||
-rw-r--r-- | include/rdma/rdma_user_cm.h | 206 |
6 files changed, 530 insertions, 19 deletions
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h index 66bf4d7..db03720 100644 --- a/include/rdma/ib_marshall.h +++ b/include/rdma/ib_marshall.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -41,6 +41,9 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, struct ib_qp_attr *src); +void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, + struct ib_ah_attr *src); + void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, struct ib_sa_path_rec *src); diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h index 44537aa..d66b15e 100644 --- a/include/rdma/ib_user_mad.h +++ b/include/rdma/ib_user_mad.h @@ -98,7 +98,7 @@ struct ib_user_mad_hdr { */ struct ib_user_mad { struct ib_user_mad_hdr hdr; - __u8 data[0]; + __u64 data[0]; }; /** diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8eacc35..765589f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -43,6 +43,9 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/kref.h> #include <asm/atomic.h> #include <asm/scatterlist.h> @@ -417,8 +420,8 @@ struct ib_wc { enum ib_wc_opcode opcode; u32 vendor_err; u32 byte_len; + struct ib_qp *qp; __be32 imm_data; - u32 qp_num; u32 src_qp; int wc_flags; u16 pkey_index; @@ -848,6 +851,49 @@ struct ib_cache { u8 *lmc_cache; }; +struct ib_dma_mapping_ops { + int (*mapping_error)(struct ib_device *dev, + u64 dma_addr); + u64 (*map_single)(struct ib_device *dev, + void *ptr, size_t size, + enum dma_data_direction direction); + void (*unmap_single)(struct ib_device *dev, + u64 addr, size_t size, + enum dma_data_direction direction); + u64 (*map_page)(struct ib_device *dev, + struct page *page, unsigned long offset, + size_t size, + enum dma_data_direction direction); + void (*unmap_page)(struct ib_device *dev, + u64 addr, size_t size, + enum dma_data_direction direction); + int (*map_sg)(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction); + void (*unmap_sg)(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction); + u64 (*dma_address)(struct ib_device *dev, + struct scatterlist *sg); + unsigned int (*dma_len)(struct ib_device *dev, + struct scatterlist *sg); + void (*sync_single_for_cpu)(struct ib_device *dev, + u64 dma_handle, + size_t size, + enum dma_data_direction dir); + void (*sync_single_for_device)(struct ib_device *dev, + u64 dma_handle, + size_t size, + enum dma_data_direction dir); + void *(*alloc_coherent)(struct ib_device *dev, + size_t size, + u64 *dma_handle, + gfp_t flag); + void (*free_coherent)(struct ib_device *dev, + size_t size, void *cpu_addr, + u64 dma_handle); +}; + struct iw_cm_verbs; struct ib_device { @@ -992,6 +1038,8 @@ struct ib_device { struct ib_mad *in_mad, struct ib_mad *out_mad); + struct ib_dma_mapping_ops *dma_ops; + struct module *owner; struct class_device class_dev; struct kobject ports_parent; @@ -1395,10 +1443,231 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) * usable for DMA. * @pd: The protection domain associated with the memory region. * @mr_access_flags: Specifies the memory access rights. + * + * Note that the ib_dma_*() functions defined below must be used + * to create/destroy addresses used with the Lkey or Rkey returned + * by ib_get_dma_mr(). */ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); /** + * ib_dma_mapping_error - check a DMA addr for error + * @dev: The device for which the dma_addr was created + * @dma_addr: The DMA address to check + */ +static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) +{ + if (dev->dma_ops) + return dev->dma_ops->mapping_error(dev, dma_addr); + return dma_mapping_error(dma_addr); +} + +/** + * ib_dma_map_single - Map a kernel virtual address to DMA address + * @dev: The device for which the dma_addr is to be created + * @cpu_addr: The kernel virtual address + * @size: The size of the region in bytes + * @direction: The direction of the DMA + */ +static inline u64 ib_dma_map_single(struct ib_device *dev, + void *cpu_addr, size_t size, + enum dma_data_direction direction) +{ + if (dev->dma_ops) + return dev->dma_ops->map_single(dev, cpu_addr, size, direction); + return dma_map_single(dev->dma_device, cpu_addr, size, direction); +} + +/** + * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() + * @dev: The device for which the DMA address was created + * @addr: The DMA address + * @size: The size of the region in bytes + * @direction: The direction of the DMA + */ +static inline void ib_dma_unmap_single(struct ib_device *dev, + u64 addr, size_t size, + enum dma_data_direction direction) +{ + if (dev->dma_ops) + dev->dma_ops->unmap_single(dev, addr, size, direction); + else + dma_unmap_single(dev->dma_device, addr, size, direction); +} + +/** + * ib_dma_map_page - Map a physical page to DMA address + * @dev: The device for which the dma_addr is to be created + * @page: The page to be mapped + * @offset: The offset within the page + * @size: The size of the region in bytes + * @direction: The direction of the DMA + */ +static inline u64 ib_dma_map_page(struct ib_device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction direction) +{ + if (dev->dma_ops) + return dev->dma_ops->map_page(dev, page, offset, size, direction); + return dma_map_page(dev->dma_device, page, offset, size, direction); +} + +/** + * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() + * @dev: The device for which the DMA address was created + * @addr: The DMA address + * @size: The size of the region in bytes + * @direction: The direction of the DMA + */ +static inline void ib_dma_unmap_page(struct ib_device *dev, + u64 addr, size_t size, + enum dma_data_direction direction) +{ + if (dev->dma_ops) + dev->dma_ops->unmap_page(dev, addr, size, direction); + else + dma_unmap_page(dev->dma_device, addr, size, direction); +} + +/** + * ib_dma_map_sg - Map a scatter/gather list to DMA addresses + * @dev: The device for which the DMA addresses are to be created + * @sg: The array of scatter/gather entries + * @nents: The number of scatter/gather entries + * @direction: The direction of the DMA + */ +static inline int ib_dma_map_sg(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + if (dev->dma_ops) + return dev->dma_ops->map_sg(dev, sg, nents, direction); + return dma_map_sg(dev->dma_device, sg, nents, direction); +} + +/** + * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses + * @dev: The device for which the DMA addresses were created + * @sg: The array of scatter/gather entries + * @nents: The number of scatter/gather entries + * @direction: The direction of the DMA + */ +static inline void ib_dma_unmap_sg(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + if (dev->dma_ops) + dev->dma_ops->unmap_sg(dev, sg, nents, direction); + else + dma_unmap_sg(dev->dma_device, sg, nents, direction); +} + +/** + * ib_sg_dma_address - Return the DMA address from a scatter/gather entry + * @dev: The device for which the DMA addresses were created + * @sg: The scatter/gather entry + */ +static inline u64 ib_sg_dma_address(struct ib_device *dev, + struct scatterlist *sg) +{ + if (dev->dma_ops) + return dev->dma_ops->dma_address(dev, sg); + return sg_dma_address(sg); +} + +/** + * ib_sg_dma_len - Return the DMA length from a scatter/gather entry + * @dev: The device for which the DMA addresses were created + * @sg: The scatter/gather entry + */ +static inline unsigned int ib_sg_dma_len(struct ib_device *dev, + struct scatterlist *sg) +{ + if (dev->dma_ops) + return dev->dma_ops->dma_len(dev, sg); + return sg_dma_len(sg); +} + +/** + * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU + * @dev: The device for which the DMA address was created + * @addr: The DMA address + * @size: The size of the region in bytes + * @dir: The direction of the DMA + */ +static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, + u64 addr, + size_t size, + enum dma_data_direction dir) +{ + if (dev->dma_ops) + dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); + else + dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); +} + +/** + * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device + * @dev: The device for which the DMA address was created + * @addr: The DMA address + * @size: The size of the region in bytes + * @dir: The direction of the DMA + */ +static inline void ib_dma_sync_single_for_device(struct ib_device *dev, + u64 addr, + size_t size, + enum dma_data_direction dir) +{ + if (dev->dma_ops) + dev->dma_ops->sync_single_for_device(dev, addr, size, dir); + else + dma_sync_single_for_device(dev->dma_device, addr, size, dir); +} + +/** + * ib_dma_alloc_coherent - Allocate memory and map it for DMA + * @dev: The device for which the DMA address is requested + * @size: The size of the region to allocate in bytes + * @dma_handle: A pointer for returning the DMA address of the region + * @flag: memory allocator flags + */ +static inline void *ib_dma_alloc_coherent(struct ib_device *dev, + size_t size, + u64 *dma_handle, + gfp_t flag) +{ + if (dev->dma_ops) + return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); + else { + dma_addr_t handle; + void *ret; + + ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); + *dma_handle = handle; + return ret; + } +} + +/** + * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() + * @dev: The device for which the DMA addresses were allocated + * @size: The size of the region + * @cpu_addr: the address returned by ib_dma_alloc_coherent() + * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() + */ +static inline void ib_dma_free_coherent(struct ib_device *dev, + size_t size, void *cpu_addr, + u64 dma_handle) +{ + if (dev->dma_ops) + dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); + else + dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); +} + +/** * ib_reg_phys_mr - Prepares a virtually addressed memory region for use * by an HCA. * @pd: The protection domain associated assigned to the registered region. diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index deb5a0a..36cd8a8 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -77,11 +77,34 @@ struct rdma_route { int num_paths; }; +struct rdma_conn_param { + const void *private_data; + u8 private_data_len; + u8 responder_resources; + u8 initiator_depth; + u8 flow_control; + u8 retry_count; /* ignored when accepting */ + u8 rnr_retry_count; + /* Fields below ignored if a QP is created on the rdma_cm_id. */ + u8 srq; + u32 qp_num; +}; + +struct rdma_ud_param { + const void *private_data; + u8 private_data_len; + struct ib_ah_attr ah_attr; + u32 qp_num; + u32 qkey; +}; + struct rdma_cm_event { enum rdma_cm_event_type event; int status; - void *private_data; - u8 private_data_len; + union { + struct rdma_conn_param conn; + struct rdma_ud_param ud; + } param; }; struct rdma_cm_id; @@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id); int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask); -struct rdma_conn_param { - const void *private_data; - u8 private_data_len; - u8 responder_resources; - u8 initiator_depth; - u8 flow_control; - u8 retry_count; /* ignored when accepting */ - u8 rnr_retry_count; - /* Fields below ignored if a QP is created on the rdma_cm_id. */ - u8 srq; - u32 qp_num; - enum ib_qp_type qp_type; -}; - /** * rdma_connect - Initiate an active connection request. + * @id: Connection identifier to connect. + * @conn_param: Connection information used for connected QPs. * * Users must have resolved a route for the rdma_cm_id to connect with * by having called rdma_resolve_route before calling this routine. + * + * This call will either connect to a remote QP or obtain remote QP + * information for unconnected rdma_cm_id's. The actual operation is + * based on the rdma_cm_id's port space. */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); @@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog); int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); /** + * rdma_notify - Notifies the RDMA CM of an asynchronous event that has + * occurred on the connection. + * @id: Connection identifier to transition to established. + * @event: Asynchronous event. + * + * This routine should be invoked by users to notify the CM of relevant + * communication events. Events that should be reported to the CM and + * when to report them are: + * + * IB_EVENT_COMM_EST - Used when a message is received on a connected + * QP before an RTU has been received. + */ +int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event); + +/** * rdma_reject - Called to reject a connection request or response. */ int rdma_reject(struct rdma_cm_id *id, const void *private_data, diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h index e8c3af1..9b176df 100644 --- a/include/rdma/rdma_cm_ib.h +++ b/include/rdma/rdma_cm_ib.h @@ -44,4 +44,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, struct ib_sa_path_rec *path_rec, int num_paths); +/* Global qkey for UD QPs and multicast groups. */ +#define RDMA_UD_QKEY 0x01234567 + #endif /* RDMA_CM_IB_H */ diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h new file mode 100644 index 0000000..9572ab8 --- /dev/null +++ b/include/rdma/rdma_user_cm.h @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef RDMA_USER_CM_H +#define RDMA_USER_CM_H + +#include <linux/types.h> +#include <linux/in6.h> +#include <rdma/ib_user_verbs.h> +#include <rdma/ib_user_sa.h> + +#define RDMA_USER_CM_ABI_VERSION 3 + +#define RDMA_MAX_PRIVATE_DATA 256 + +enum { + RDMA_USER_CM_CMD_CREATE_ID, + RDMA_USER_CM_CMD_DESTROY_ID, + RDMA_USER_CM_CMD_BIND_ADDR, + RDMA_USER_CM_CMD_RESOLVE_ADDR, + RDMA_USER_CM_CMD_RESOLVE_ROUTE, + RDMA_USER_CM_CMD_QUERY_ROUTE, + RDMA_USER_CM_CMD_CONNECT, + RDMA_USER_CM_CMD_LISTEN, + RDMA_USER_CM_CMD_ACCEPT, + RDMA_USER_CM_CMD_REJECT, + RDMA_USER_CM_CMD_DISCONNECT, + RDMA_USER_CM_CMD_INIT_QP_ATTR, + RDMA_USER_CM_CMD_GET_EVENT, + RDMA_USER_CM_CMD_GET_OPTION, + RDMA_USER_CM_CMD_SET_OPTION, + RDMA_USER_CM_CMD_NOTIFY +}; + +/* + * command ABI structures. + */ +struct rdma_ucm_cmd_hdr { + __u32 cmd; + __u16 in; + __u16 out; +}; + +struct rdma_ucm_create_id { + __u64 uid; + __u64 response; + __u16 ps; + __u8 reserved[6]; +}; + +struct rdma_ucm_create_id_resp { + __u32 id; +}; + +struct rdma_ucm_destroy_id { + __u64 response; + __u32 id; + __u32 reserved; +}; + +struct rdma_ucm_destroy_id_resp { + __u32 events_reported; +}; + +struct rdma_ucm_bind_addr { + __u64 response; + struct sockaddr_in6 addr; + __u32 id; +}; + +struct rdma_ucm_resolve_addr { + struct sockaddr_in6 src_addr; + struct sockaddr_in6 dst_addr; + __u32 id; + __u32 timeout_ms; +}; + +struct rdma_ucm_resolve_route { + __u32 id; + __u32 timeout_ms; +}; + +struct rdma_ucm_query_route { + __u64 response; + __u32 id; + __u32 reserved; +}; + +struct rdma_ucm_query_route_resp { + __u64 node_guid; + struct ib_user_path_rec ib_route[2]; + struct sockaddr_in6 src_addr; + struct sockaddr_in6 dst_addr; + __u32 num_paths; + __u8 port_num; + __u8 reserved[3]; +}; + +struct rdma_ucm_conn_param { + __u32 qp_num; + __u32 reserved; + __u8 private_data[RDMA_MAX_PRIVATE_DATA]; + __u8 private_data_len; + __u8 srq; + __u8 responder_resources; + __u8 initiator_depth; + __u8 flow_control; + __u8 retry_count; + __u8 rnr_retry_count; + __u8 valid; +}; + +struct rdma_ucm_ud_param { + __u32 qp_num; + __u32 qkey; + struct ib_uverbs_ah_attr ah_attr; + __u8 private_data[RDMA_MAX_PRIVATE_DATA]; + __u8 private_data_len; + __u8 reserved[7]; +}; + +struct rdma_ucm_connect { + struct rdma_ucm_conn_param conn_param; + __u32 id; + __u32 reserved; +}; + +struct rdma_ucm_listen { + __u32 id; + __u32 backlog; +}; + +struct rdma_ucm_accept { + __u64 uid; + struct rdma_ucm_conn_param conn_param; + __u32 id; + __u32 reserved; +}; + +struct rdma_ucm_reject { + __u32 id; + __u8 private_data_len; + __u8 reserved[3]; + __u8 private_data[RDMA_MAX_PRIVATE_DATA]; +}; + +struct rdma_ucm_disconnect { + __u32 id; +}; + +struct rdma_ucm_init_qp_attr { + __u64 response; + __u32 id; + __u32 qp_state; +}; + +struct rdma_ucm_notify { + __u32 id; + __u32 event; +}; + +struct rdma_ucm_get_event { + __u64 response; +}; + +struct rdma_ucm_event_resp { + __u64 uid; + __u32 id; + __u32 event; + __u32 status; + union { + struct rdma_ucm_conn_param conn; + struct rdma_ucm_ud_param ud; + } param; +}; + +#endif /* RDMA_USER_CM_H */ |