summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2011-01-24 18:41:10 +0100
committerTakashi Iwai <tiwai@suse.de>2011-01-24 18:41:10 +0100
commit49c6ad430d74fb7995990be0f66165e4b94a6bc5 (patch)
tree9a0b4d5158cea625efd1f4185cdea79fe9f10d85 /fs
parent233d84c46c2253d13e10b42d88c14748fbb67a98 (diff)
parent1bae4ce27c9c90344f23c65ea6966c50ffeae2f5 (diff)
downloadop-kernel-dev-49c6ad430d74fb7995990be0f66165e4b94a6bc5.zip
op-kernel-dev-49c6ad430d74fb7995990be0f66165e4b94a6bc5.tar.gz
Merge commit 'v2.6.38-rc2' into topic/misc
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/cifs/cifs_debug.c10
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_unicode.c127
-rw-r--r--fs/cifs/cifsacl.c13
-rw-r--r--fs/cifs/cifsfs.c44
-rw-r--r--fs/cifs/cifsfs.h15
-rw-r--r--fs/cifs/cifsglob.h64
-rw-r--r--fs/cifs/cifspdu.h62
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c113
-rw-r--r--fs/cifs/connect.c190
-rw-r--r--fs/cifs/file.c289
-rw-r--r--fs/cifs/inode.c8
-rw-r--r--fs/cifs/misc.c73
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/sess.c15
-rw-r--r--fs/cifs/transport.c434
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/ext3/super.c25
-rw-r--r--fs/ext4/super.c25
-rw-r--r--fs/gfs2/inode.c72
-rw-r--r--fs/gfs2/inode.h1
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/ocfs2/super.c5
-rw-r--r--fs/pipe.c10
-rw-r--r--fs/proc/Kconfig6
-rw-r--r--fs/quota/dquot.c18
-rw-r--r--fs/quota/quota.c41
-rw-r--r--fs/reiserfs/super.c17
-rw-r--r--fs/sysfs/Kconfig2
31 files changed, 946 insertions, 760 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 9a7921a..3db9caa 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -50,7 +50,7 @@ config EXPORTFS
tristate
config FILE_LOCKING
- bool "Enable POSIX file locking API" if EMBEDDED
+ bool "Enable POSIX file locking API" if EXPERT
default y
help
This option enables standard file locking support, required
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index ede9830..65829d3 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -79,11 +79,11 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+ cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
- mid_entry->tsk,
+ mid_entry->callback_data,
mid_entry->mid);
#ifdef CONFIG_CIFS_STATS2
cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
@@ -218,11 +218,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
mid_entry = list_entry(tmp3, struct mid_q_entry,
qhead);
seq_printf(m, "\tState: %d com: %d pid:"
- " %d tsk: %p mid %d\n",
+ " %d cbdata: %p mid %d\n",
mid_entry->midState,
(int)mid_entry->command,
mid_entry->pid,
- mid_entry->tsk,
+ mid_entry->callback_data,
mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
@@ -331,7 +331,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
atomic_read(&totSmBufAllocCount));
#endif /* CONFIG_CIFS_STATS2 */
- seq_printf(m, "Operations (MIDs): %d\n", midCount.counter);
+ seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
seq_printf(m,
"\n%d session %d share reconnects\n",
tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 7852cd6..ac51cd2 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -40,6 +40,7 @@
#define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */
#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
+#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 430f510..fc0fd4f 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,10 +44,14 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
int charlen, outlen = 0;
int maxwords = maxbytes / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
+ __u16 ftmp;
- for (i = 0; i < maxwords && from[i]; i++) {
- charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
- NLS_MAX_CHARSET_SIZE);
+ for (i = 0; i < maxwords; i++) {
+ ftmp = get_unaligned_le16(&from[i]);
+ if (ftmp == 0)
+ break;
+
+ charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
if (charlen > 0)
outlen += charlen;
else
@@ -58,9 +62,9 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
}
/*
- * cifs_mapchar - convert a little-endian char to proper char in codepage
+ * cifs_mapchar - convert a host-endian char to proper char in codepage
* @target - where converted character should be copied
- * @src_char - 2 byte little-endian source character
+ * @src_char - 2 byte host-endian source character
* @cp - codepage to which character should be converted
* @mapchar - should character be mapped according to mapchars mount option?
*
@@ -69,7 +73,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
* enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
*/
static int
-cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
bool mapchar)
{
int len = 1;
@@ -82,7 +86,7 @@ cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp,
* build_path_from_dentry are modified, as they use slash as
* separator.
*/
- switch (le16_to_cpu(src_char)) {
+ switch (src_char) {
case UNI_COLON:
*target = ':';
break;
@@ -109,8 +113,7 @@ out:
return len;
cp_convert:
- len = cp->uni2char(le16_to_cpu(src_char), target,
- NLS_MAX_CHARSET_SIZE);
+ len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
if (len <= 0) {
*target = '?';
len = 1;
@@ -149,6 +152,7 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
int nullsize = nls_nullsize(codepage);
int fromwords = fromlen / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
+ __u16 ftmp;
/*
* because the chars can be of varying widths, we need to take care
@@ -158,19 +162,23 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
*/
safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
- for (i = 0; i < fromwords && from[i]; i++) {
+ for (i = 0; i < fromwords; i++) {
+ ftmp = get_unaligned_le16(&from[i]);
+ if (ftmp == 0)
+ break;
+
/*
* check to see if converting this character might make the
* conversion bleed into the null terminator
*/
if (outlen >= safelen) {
- charlen = cifs_mapchar(tmp, from[i], codepage, mapchar);
+ charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
if ((outlen + charlen) > (tolen - nullsize))
break;
}
/* put converted char into 'to' buffer */
- charlen = cifs_mapchar(&to[outlen], from[i], codepage, mapchar);
+ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
outlen += charlen;
}
@@ -193,24 +201,21 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
{
int charlen;
int i;
- wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */
+ wchar_t wchar_to; /* needed to quiet sparse */
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
-
- /* works for 2.4.0 kernel or later */
- charlen = codepage->char2uni(from, len, &wchar_to[i]);
+ charlen = codepage->char2uni(from, len, &wchar_to);
if (charlen < 1) {
- cERROR(1, "strtoUCS: char2uni of %d returned %d",
- (int)*from, charlen);
+ cERROR(1, "strtoUCS: char2uni of 0x%x returned %d",
+ *from, charlen);
/* A question mark */
- to[i] = cpu_to_le16(0x003f);
+ wchar_to = 0x003f;
charlen = 1;
- } else
- to[i] = cpu_to_le16(wchar_to[i]);
-
+ }
+ put_unaligned_le16(wchar_to, &to[i]);
}
- to[i] = 0;
+ put_unaligned_le16(0, &to[i]);
return i;
}
@@ -252,3 +257,79 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
return dst;
}
+/*
+ * Convert 16 bit Unicode pathname to wire format from string in current code
+ * page. Conversion may involve remapping up the six characters that are
+ * only legal in POSIX-like OS (if they are present in the string). Path
+ * names are little endian 16 bit Unicode on the wire
+ */
+int
+cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+ const struct nls_table *cp, int mapChars)
+{
+ int i, j, charlen;
+ int len_remaining = maxlen;
+ char src_char;
+ __u16 temp;
+
+ if (!mapChars)
+ return cifs_strtoUCS(target, source, PATH_MAX, cp);
+
+ for (i = 0, j = 0; i < maxlen; j++) {
+ src_char = source[i];
+ switch (src_char) {
+ case 0:
+ put_unaligned_le16(0, &target[j]);
+ goto ctoUCS_out;
+ case ':':
+ temp = UNI_COLON;
+ break;
+ case '*':
+ temp = UNI_ASTERIK;
+ break;
+ case '?':
+ temp = UNI_QUESTION;
+ break;
+ case '<':
+ temp = UNI_LESSTHAN;
+ break;
+ case '>':
+ temp = UNI_GRTRTHAN;
+ break;
+ case '|':
+ temp = UNI_PIPE;
+ break;
+ /*
+ * FIXME: We can not handle remapping backslash (UNI_SLASH)
+ * until all the calls to build_path_from_dentry are modified,
+ * as they use backslash as separator.
+ */
+ default:
+ charlen = cp->char2uni(source+i, len_remaining,
+ &temp);
+ /*
+ * if no match, use question mark, which at least in
+ * some cases serves as wild card
+ */
+ if (charlen < 1) {
+ temp = 0x003f;
+ charlen = 1;
+ }
+ len_remaining -= charlen;
+ /*
+ * character may take more than one byte in the source
+ * string, but will take exactly two bytes in the
+ * target string
+ */
+ i += charlen;
+ continue;
+ }
+ put_unaligned_le16(temp, &target[j]);
+ i++; /* move to next char in source string */
+ len_remaining--;
+ }
+
+ctoUCS_out:
+ return i;
+}
+
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index a437ec3..1e7636b 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -41,9 +41,12 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
;
-/* security id for everyone */
+/* security id for everyone/world system group */
static const struct cifs_sid sid_everyone = {
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+/* security id for Authenticated Users system group */
+static const struct cifs_sid sid_authusers = {
+ 1, 1, {0, 0, 0, 0, 0, 5}, {11} };
/* group users */
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
@@ -365,7 +368,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
if (num_aces > 0) {
umode_t user_mask = S_IRWXU;
umode_t group_mask = S_IRWXG;
- umode_t other_mask = S_IRWXO;
+ umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
GFP_KERNEL);
@@ -390,6 +393,12 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
ppace[i]->type,
&fattr->cf_mode,
&other_mask);
+ if (compare_sids(&(ppace[i]->sid), &sid_authusers))
+ access_flags_to_mode(ppace[i]->access_req,
+ ppace[i]->type,
+ &fattr->cf_mode,
+ &other_mask);
+
/* memcpy((void *)(&(cifscred->aces[i])),
(void *)ppace[i],
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d9f652a..a8323f1 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -77,7 +77,11 @@ unsigned int cifs_max_pending = CIFS_MAX_REQ;
module_param(cifs_max_pending, int, 0);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
"Default: 50 Range: 2 to 256");
-
+unsigned short echo_retries = 5;
+module_param(echo_retries, ushort, 0644);
+MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
+ "reconnecting server. Default: 5. 0 means "
+ "never reconnect.");
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
@@ -729,6 +733,25 @@ const struct file_operations cifs_file_ops = {
.setlease = cifs_setlease,
};
+const struct file_operations cifs_file_strict_ops = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = cifs_strict_readv,
+ .aio_write = cifs_file_aio_write,
+ .open = cifs_open,
+ .release = cifs_close,
+ .lock = cifs_lock,
+ .fsync = cifs_strict_fsync,
+ .flush = cifs_flush,
+ .mmap = cifs_file_strict_mmap,
+ .splice_read = generic_file_splice_read,
+ .llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+ .unlocked_ioctl = cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+ .setlease = cifs_setlease,
+};
+
const struct file_operations cifs_file_direct_ops = {
/* no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
@@ -747,6 +770,7 @@ const struct file_operations cifs_file_direct_ops = {
.llseek = cifs_llseek,
.setlease = cifs_setlease,
};
+
const struct file_operations cifs_file_nobrl_ops = {
.read = do_sync_read,
.write = do_sync_write,
@@ -765,6 +789,24 @@ const struct file_operations cifs_file_nobrl_ops = {
.setlease = cifs_setlease,
};
+const struct file_operations cifs_file_strict_nobrl_ops = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = cifs_strict_readv,
+ .aio_write = cifs_file_aio_write,
+ .open = cifs_open,
+ .release = cifs_close,
+ .fsync = cifs_strict_fsync,
+ .flush = cifs_flush,
+ .mmap = cifs_file_strict_mmap,
+ .splice_read = generic_file_splice_read,
+ .llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+ .unlocked_ioctl = cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+ .setlease = cifs_setlease,
+};
+
const struct file_operations cifs_file_direct_nobrl_ops = {
/* no mmap, no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 851030f..f23206d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -61,6 +61,7 @@ extern int cifs_rename(struct inode *, struct dentry *, struct inode *,
struct dentry *);
extern int cifs_revalidate_file(struct file *filp);
extern int cifs_revalidate_dentry(struct dentry *);
+extern void cifs_invalidate_mapping(struct inode *inode);
extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int cifs_setattr(struct dentry *, struct iattr *);
@@ -72,19 +73,25 @@ extern const struct inode_operations cifs_dfs_referral_inode_operations;
/* Functions related to files and directories */
extern const struct file_operations cifs_file_ops;
extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
-extern const struct file_operations cifs_file_nobrl_ops;
-extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
+extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_direct_nobrl_ops;
+extern const struct file_operations cifs_file_strict_nobrl_ops;
extern int cifs_open(struct inode *inode, struct file *file);
extern int cifs_close(struct inode *inode, struct file *file);
extern int cifs_closedir(struct inode *inode, struct file *file);
extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
- size_t read_size, loff_t *poffset);
+ size_t read_size, loff_t *poffset);
+extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
size_t write_size, loff_t *poffset);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, int);
+extern int cifs_strict_fsync(struct file *, int);
extern int cifs_flush(struct file *, fl_owner_t id);
extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
@@ -118,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.68"
+#define CIFS_VERSION "1.69"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 606ca8b..5bfb753 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -161,6 +161,7 @@ struct TCP_Server_Info {
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+ enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
struct sockaddr_storage dstaddr;
@@ -168,25 +169,16 @@ struct TCP_Server_Info {
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
struct list_head pending_mid_q;
- void *Server_NlsInfo; /* BB - placeholder for future NLS info */
- unsigned short server_codepage; /* codepage for the server */
- enum protocolEnum protocolType;
- char versionMajor;
- char versionMinor;
- bool svlocal:1; /* local server or remote */
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
-#ifdef CONFIG_CIFS_STATS2
- atomic_t inSend; /* requests trying to send */
- atomic_t num_waiters; /* blocked waiting to get in sendrecv */
-#endif
- enum statusEnum tcpStatus; /* what we think the status is */
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
char secMode;
+ bool session_estab; /* mark when very first sess is established */
+ u16 dialect; /* dialect index that server chose */
enum securityEnum secType;
unsigned int maxReq; /* Clients should submit no more */
/* than maxReq distinct unanswered SMBs to the server when using */
@@ -199,8 +191,6 @@ struct TCP_Server_Info {
unsigned int max_vcs; /* maximum number of smb sessions, at least
those that can be specified uniquely with
vcnumbers */
- char sessid[4]; /* unique token id for this session */
- /* (returned on Negotiate */
int capabilities; /* allow selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
__u16 CurrentMid; /* multiplex id - rotating counter */
@@ -210,17 +200,20 @@ struct TCP_Server_Info {
__u32 sequence_number; /* for signing, protected by srv_mutex */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
- u16 dialect; /* dialect index that server chose */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
/* extended security flavors that server supports */
+ bool sec_ntlmssp; /* supports NTLMSSP */
+ bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
- bool sec_kerberosu2u; /* supports U2U Kerberos */
- bool sec_ntlmssp; /* supports NTLMSSP */
- bool session_estab; /* mark when very first sess is established */
+ struct delayed_work echo; /* echo ping workqueue job */
#ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
+#ifdef CONFIG_CIFS_STATS2
+ atomic_t inSend; /* requests trying to send */
+ atomic_t num_waiters; /* blocked waiting to get in sendrecv */
+#endif
};
/*
@@ -446,11 +439,11 @@ struct cifsInodeInfo {
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
- unsigned long time; /* jiffies of last update/check of inode */
- bool clientCanCacheRead:1; /* read oplock */
- bool clientCanCacheAll:1; /* read and writebehind oplock */
- bool delete_pending:1; /* DELETE_ON_CLOSE is set */
- bool invalid_mapping:1; /* pagecache is invalid */
+ bool clientCanCacheRead; /* read oplock */
+ bool clientCanCacheAll; /* read and writebehind oplock */
+ bool delete_pending; /* DELETE_ON_CLOSE is set */
+ bool invalid_mapping; /* pagecache is invalid */
+ unsigned long time; /* jiffies of last update of inode */
u64 server_eof; /* current file size on server */
u64 uniqueid; /* server inode number */
u64 createtime; /* creation time on server */
@@ -508,6 +501,18 @@ static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
#endif
+struct mid_q_entry;
+
+/*
+ * This is the prototype for the mid callback function. When creating one,
+ * take special care to avoid deadlocks. Things to bear in mind:
+ *
+ * - it will be called by cifsd
+ * - the GlobalMid_Lock will be held
+ * - the mid will be removed from the pending_mid_q list
+ */
+typedef void (mid_callback_t)(struct mid_q_entry *mid);
+
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
@@ -519,7 +524,8 @@ struct mid_q_entry {
unsigned long when_sent; /* time when smb send finished */
unsigned long when_received; /* when demux complete (taken off wire) */
#endif
- struct task_struct *tsk; /* task waiting for response */
+ mid_callback_t *callback; /* call completion callback */
+ void *callback_data; /* general purpose pointer for callback */
struct smb_hdr *resp_buf; /* response buffer */
int midState; /* wish this were enum but can not pass to wait_event */
__u8 command; /* smb command code */
@@ -622,12 +628,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define CIFS_IOVEC 4 /* array of response buffers */
/* Type of Request to SendReceive2 */
-#define CIFS_STD_OP 0 /* normal request timeout */
-#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */
-#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */
-#define CIFS_BLOCKING_OP 4 /* operation can block */
-#define CIFS_ASYNC_OP 8 /* do not wait for response */
-#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */
+#define CIFS_BLOCKING_OP 1 /* operation can block */
+#define CIFS_ASYNC_OP 2 /* do not wait for response */
+#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */
#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
#define CIFS_NO_RESP 0x040 /* no response buffer required */
@@ -790,6 +793,9 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
+/* reconnect after this many failed echo attempts */
+GLOBAL_EXTERN unsigned short echo_retries;
+
void cifs_oplock_break(struct work_struct *work);
void cifs_oplock_break_get(struct cifsFileInfo *cfile);
void cifs_oplock_break_put(struct cifsFileInfo *cfile);
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index de36b09..b5c8cc5 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -23,6 +23,7 @@
#define _CIFSPDU_H
#include <net/sock.h>
+#include <asm/unaligned.h>
#include "smbfsctl.h"
#ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -50,6 +51,7 @@
#define SMB_COM_SETATTR 0x09 /* trivial response */
#define SMB_COM_LOCKING_ANDX 0x24 /* trivial response */
#define SMB_COM_COPY 0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_ECHO 0x2B /* echo request */
#define SMB_COM_OPEN_ANDX 0x2D /* Legacy open for old servers */
#define SMB_COM_READ_ANDX 0x2E
#define SMB_COM_WRITE_ANDX 0x2F
@@ -425,11 +427,49 @@ struct smb_hdr {
__u16 Mid;
__u8 WordCount;
} __attribute__((packed));
-/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+
+/* given a pointer to an smb_hdr retrieve a char pointer to the byte count */
+#define BCC(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + \
+ (2 * (smb_var)->WordCount))
+
/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
+#define pByteArea(smb_var) (BCC(smb_var) + 2)
+
+/* get the converted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc(struct smb_hdr *hdr)
+{
+ __u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+ return get_unaligned(bc_ptr);
+}
+
+/* get the unconverted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc_le(struct smb_hdr *hdr)
+{
+ __le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+ return get_unaligned_le16(bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in host-byte order */
+static inline void
+put_bcc(__u16 count, struct smb_hdr *hdr)
+{
+ __u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+ put_unaligned(count, bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in little-endian */
+static inline void
+put_bcc_le(__u16 count, struct smb_hdr *hdr)
+{
+ __le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+ put_unaligned_le16(count, bc_ptr);
+}
/*
* Computer Name Length (since Netbios name was length 16 with last byte 0x20)
@@ -760,6 +800,20 @@ typedef struct smb_com_tconx_rsp_ext {
*
*/
+typedef struct smb_com_echo_req {
+ struct smb_hdr hdr;
+ __le16 EchoCount;
+ __le16 ByteCount;
+ char Data[1];
+} __attribute__((packed)) ECHO_REQ;
+
+typedef struct smb_com_echo_rsp {
+ struct smb_hdr hdr;
+ __le16 SequenceNumber;
+ __le16 ByteCount;
+ char Data[1];
+} __attribute__((packed)) ECHO_RSP;
+
typedef struct smb_com_logoff_andx_req {
struct smb_hdr hdr; /* wct = 2 */
__u8 AndXCommand;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e6d1481..982895f 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -61,6 +61,12 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref,
char **devname);
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
+extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
+ struct TCP_Server_Info *server);
+extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
+extern int cifs_call_async(struct TCP_Server_Info *server,
+ struct smb_hdr *in_buf, mid_callback_t *callback,
+ void *cbdata);
extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
@@ -347,12 +353,13 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 netfid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
- const bool waitFlag);
+ const bool waitFlag, const __u8 oplock_level);
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, struct file_lock *,
const __u16 lock_type, const bool waitFlag);
extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBEcho(struct TCP_Server_Info *server);
extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
extern struct cifsSesInfo *sesInfoAlloc(void);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2f6795e..3106f5e 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -331,37 +331,35 @@ smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
static int validate_t2(struct smb_t2_rsp *pSMB)
{
- int rc = -EINVAL;
- int total_size;
- char *pBCC;
+ unsigned int total_size;
+
+ /* check for plausible wct */
+ if (pSMB->hdr.WordCount < 10)
+ goto vt2_err;
- /* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
- if (pSMB->hdr.WordCount >= 10) {
- if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
- (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
- /* check that bcc is at least as big as parms + data */
- /* check that bcc is less than negotiated smb buffer */
- total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
- if (total_size < 512) {
- total_size +=
- le16_to_cpu(pSMB->t2_rsp.DataCount);
- /* BCC le converted in SendReceive */
- pBCC = (pSMB->hdr.WordCount * 2) +
- sizeof(struct smb_hdr) +
- (char *)pSMB;
- if ((total_size <= (*(u16 *)pBCC)) &&
- (total_size <
- CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
- return 0;
- }
- }
- }
- }
+ if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
+ get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
+ goto vt2_err;
+
+ /* check that bcc is at least as big as parms + data */
+ /* check that bcc is less than negotiated smb buffer */
+ total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
+ if (total_size >= 512)
+ goto vt2_err;
+
+ total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
+ if (total_size > get_bcc(&pSMB->hdr) ||
+ total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
+ goto vt2_err;
+
+ return 0;
+vt2_err:
cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
sizeof(struct smb_t2_rsp) + 16);
- return rc;
+ return -EINVAL;
}
+
int
CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
{
@@ -452,7 +450,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
- GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
/* even though we do not use raw we might as well set this
accurately, in case we ever find a need for it */
if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -566,7 +563,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
- GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
server->timeAdj *= 60;
@@ -706,6 +702,53 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
return rc;
}
+/*
+ * This is a no-op for now. We're not really interested in the reply, but
+ * rather in the fact that the server sent one and that server->lstrp
+ * gets updated.
+ *
+ * FIXME: maybe we should consider checking that the reply matches request?
+ */
+static void
+cifs_echo_callback(struct mid_q_entry *mid)
+{
+ struct TCP_Server_Info *server = mid->callback_data;
+
+ DeleteMidQEntry(mid);
+ atomic_dec(&server->inFlight);
+ wake_up(&server->request_q);
+}
+
+int
+CIFSSMBEcho(struct TCP_Server_Info *server)
+{
+ ECHO_REQ *smb;
+ int rc = 0;
+
+ cFYI(1, "In echo request");
+
+ rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
+ if (rc)
+ return rc;
+
+ /* set up echo request */
+ smb->hdr.Tid = cpu_to_le16(0xffff);
+ smb->hdr.WordCount = 1;
+ put_unaligned_le16(1, &smb->EchoCount);
+ put_bcc_le(1, &smb->hdr);
+ smb->Data[0] = 'a';
+ smb->hdr.smb_buf_length += 3;
+
+ rc = cifs_call_async(server, (struct smb_hdr *)smb,
+ cifs_echo_callback, server);
+ if (rc)
+ cFYI(1, "Echo request failed: %d", rc);
+
+ cifs_small_buf_release(smb);
+
+ return rc;
+}
+
int
CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
{
@@ -1193,7 +1236,7 @@ OldOpenRetry:
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+ (struct smb_hdr *)pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
@@ -1306,7 +1349,7 @@ openRetry:
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+ (struct smb_hdr *)pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
@@ -1388,7 +1431,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
- &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
+ &resp_buf_type, CIFS_LOG_ERROR);
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
@@ -1663,7 +1706,8 @@ int
CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
- const __u32 numLock, const __u8 lockType, const bool waitFlag)
+ const __u32 numLock, const __u8 lockType,
+ const bool waitFlag, const __u8 oplock_level)
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
@@ -1691,6 +1735,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMB->NumberOfLocks = cpu_to_le16(numLock);
pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
pSMB->LockType = lockType;
+ pSMB->OplockLevel = oplock_level;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
@@ -3087,7 +3132,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
- CIFS_STD_OP);
+ 0);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
cFYI(1, "Send error in QuerySecDesc = %d", rc);
@@ -5562,7 +5607,7 @@ QAllEAsRetry:
}
/* make sure list_len doesn't go past end of SMB */
- end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9f59887..18d3c77 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -52,6 +52,9 @@
#define CIFS_PORT 445
#define RFC1001_PORT 139
+/* SMB echo "timeout" -- FIXME: tunable? */
+#define SMB_ECHO_INTERVAL (60 * HZ)
+
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
unsigned char *p24);
@@ -152,6 +155,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
+ cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
@@ -163,7 +167,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
}
spin_unlock(&cifs_tcp_ses_lock);
+
/* do not want to be sending data on a socket we are freeing */
+ cFYI(1, "%s: tearing down socket", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
@@ -180,22 +186,20 @@ cifs_reconnect(struct TCP_Server_Info *server)
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
+ server->lstrp = jiffies;
+ mutex_unlock(&server->srv_mutex);
+ /* mark submitted MIDs for retry and issue callback */
+ cFYI(1, "%s: issuing mid callbacks", __func__);
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct
- mid_q_entry,
- qhead);
- if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- /* Mark other intransit requests as needing
- retry so we do not immediately mark the
- session bad again (ie after we reconnect
- below) as they timeout too */
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ if (mid_entry->midState == MID_REQUEST_SUBMITTED)
mid_entry->midState = MID_RETRY_NEEDED;
- }
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
}
spin_unlock(&GlobalMid_Lock);
- mutex_unlock(&server->srv_mutex);
while ((server->tcpStatus != CifsExiting) &&
(server->tcpStatus != CifsGood)) {
@@ -212,10 +216,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsGood;
spin_unlock(&GlobalMid_Lock);
- /* atomic_set(&server->inFlight,0);*/
- wake_up(&server->response_q);
}
}
+
return rc;
}
@@ -229,9 +232,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
{
struct smb_t2_rsp *pSMBt;
- int total_data_size;
- int data_in_this_rsp;
int remaining;
+ __u16 total_data_size, data_in_this_rsp;
if (pSMB->Command != SMB_COM_TRANSACTION2)
return 0;
@@ -245,8 +247,8 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
pSMBt = (struct smb_t2_rsp *)pSMB;
- total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
- data_in_this_rsp = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+ total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+ data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
remaining = total_data_size - data_in_this_rsp;
@@ -272,21 +274,18 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
{
struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB;
- int total_data_size;
- int total_in_buf;
- int remaining;
- int total_in_buf2;
char *data_area_of_target;
char *data_area_of_buf2;
- __u16 byte_count;
+ int remaining;
+ __u16 byte_count, total_data_size, total_in_buf, total_in_buf2;
- total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
+ total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
- if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
+ if (total_data_size !=
+ get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount))
cFYI(1, "total data size of primary and secondary t2 differ");
- }
- total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+ total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
remaining = total_data_size - total_in_buf;
@@ -296,28 +295,28 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
if (remaining == 0) /* nothing to do, ignore */
return 0;
- total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
+ total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount);
if (remaining < total_in_buf2) {
cFYI(1, "transact2 2nd response contains too much data");
}
/* find end of first SMB data area */
data_area_of_target = (char *)&pSMBt->hdr.Protocol +
- le16_to_cpu(pSMBt->t2_rsp.DataOffset);
+ get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
/* validate target area */
- data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol +
- le16_to_cpu(pSMB2->t2_rsp.DataOffset);
+ data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol +
+ get_unaligned_le16(&pSMB2->t2_rsp.DataOffset);
data_area_of_target += total_in_buf;
/* copy second buffer into end of first buffer */
memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
total_in_buf += total_in_buf2;
- pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf);
- byte_count = le16_to_cpu(BCC_LE(pTargetSMB));
+ put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+ byte_count = get_bcc_le(pTargetSMB);
byte_count += total_in_buf2;
- BCC_LE(pTargetSMB) = cpu_to_le16(byte_count);
+ put_bcc_le(byte_count, pTargetSMB);
byte_count = pTargetSMB->smb_buf_length;
byte_count += total_in_buf2;
@@ -331,7 +330,26 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
return 0; /* we are done */
} else /* more responses to go */
return 1;
+}
+
+static void
+cifs_echo_request(struct work_struct *work)
+{
+ int rc;
+ struct TCP_Server_Info *server = container_of(work,
+ struct TCP_Server_Info, echo.work);
+
+ /* no need to ping if we got a response recently */
+ if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+ goto requeue_echo;
+ rc = CIFSSMBEcho(server);
+ if (rc)
+ cFYI(1, "Unable to send echo request to server: %s",
+ server->hostname);
+
+requeue_echo:
+ queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
}
static int
@@ -345,8 +363,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
struct msghdr smb_msg;
struct kvec iov;
struct socket *csocket = server->ssocket;
- struct list_head *tmp;
- struct cifsSesInfo *ses;
+ struct list_head *tmp, *tmp2;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
char temp;
@@ -399,7 +416,20 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
pdu_length = 4; /* enough to get RFC1001 header */
+
incomplete_rcv:
+ if (echo_retries > 0 &&
+ time_after(jiffies, server->lstrp +
+ (echo_retries * SMB_ECHO_INTERVAL))) {
+ cERROR(1, "Server %s has not responded in %d seconds. "
+ "Reconnecting...", server->hostname,
+ (echo_retries * SMB_ECHO_INTERVAL / HZ));
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ wake_up(&server->response_q);
+ continue;
+ }
+
length =
kernel_recvmsg(csocket, &smb_msg,
&iov, 1, pdu_length, 0 /* BB other flags? */);
@@ -559,10 +589,11 @@ incomplete_rcv:
continue;
}
+ mid_entry = NULL;
+ server->lstrp = jiffies;
- task_to_wake = NULL;
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if ((mid_entry->mid == smb_buffer->Mid) &&
@@ -603,20 +634,19 @@ incomplete_rcv:
mid_entry->resp_buf = smb_buffer;
mid_entry->largeBuf = isLargeBuf;
multi_t2_fnd:
- task_to_wake = mid_entry->tsk;
mid_entry->midState = MID_RESPONSE_RECEIVED;
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
#ifdef CONFIG_CIFS_STATS2
mid_entry->when_received = jiffies;
#endif
- /* so we do not time out requests to server
- which is still responding (since server could
- be busy but not dead) */
- server->lstrp = jiffies;
break;
}
+ mid_entry = NULL;
}
spin_unlock(&GlobalMid_Lock);
- if (task_to_wake) {
+
+ if (mid_entry != NULL) {
/* Was previous buf put in mpx struct for multi-rsp? */
if (!isMultiRsp) {
/* smb buffer will be freed by user thread */
@@ -625,11 +655,10 @@ multi_t2_fnd:
else
smallbuf = NULL;
}
- wake_up_process(task_to_wake);
} else if (!is_valid_oplock_break(smb_buffer, server) &&
!isMultiRsp) {
cERROR(1, "No task to wake, unknown frame received! "
- "NumMids %d", midCount.counter);
+ "NumMids %d", atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
@@ -677,44 +706,16 @@ multi_t2_fnd:
if (smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(smallbuf);
- /*
- * BB: we shouldn't have to do any of this. It shouldn't be
- * possible to exit from the thread with active SMB sessions
- */
- spin_lock(&cifs_tcp_ses_lock);
- if (list_empty(&server->pending_mid_q)) {
- /* loop through server session structures attached to this and
- mark them dead */
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo,
- smb_ses_list);
- ses->status = CifsExiting;
- ses->server = NULL;
- }
- spin_unlock(&cifs_tcp_ses_lock);
- } else {
- /* although we can not zero the server struct pointer yet,
- since there are active requests which may depnd on them,
- mark the corresponding SMB sessions as exiting too */
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo,
- smb_ses_list);
- ses->status = CifsExiting;
- }
-
+ if (!list_empty(&server->pending_mid_q)) {
spin_lock(&GlobalMid_Lock);
- list_for_each(tmp, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
- cFYI(1, "Clearing Mid 0x%x - waking up ",
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Clearing Mid 0x%x - issuing callback",
mid_entry->mid);
- task_to_wake = mid_entry->tsk;
- if (task_to_wake)
- wake_up_process(task_to_wake);
- }
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
}
spin_unlock(&GlobalMid_Lock);
- spin_unlock(&cifs_tcp_ses_lock);
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
}
@@ -732,18 +733,6 @@ multi_t2_fnd:
coming home not much else we can do but free the memory */
}
- /* last chance to mark ses pointers invalid
- if there are any pointing to this (e.g
- if a crazy root user tried to kill cifsd
- kernel thread explicitly this might happen) */
- /* BB: This shouldn't be necessary, see above */
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
- ses->server = NULL;
- }
- spin_unlock(&cifs_tcp_ses_lock);
-
kfree(server->hostname);
task_to_wake = xchg(&server->tsk, NULL);
kfree(server);
@@ -1612,6 +1601,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ cancel_delayed_work_sync(&server->echo);
+
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
@@ -1701,8 +1692,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
+ tcp_ses->lstrp = jiffies;
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+ INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
/*
* at this point we are the only ones with the pointer
@@ -1751,6 +1744,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cifs_fscache_get_client_cookie(tcp_ses);
+ /* queue echo request delayed work */
+ queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+
return tcp_ses;
out_err_crypto_release:
@@ -2936,8 +2932,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
TCONX_RSP *pSMBr;
unsigned char *bcc_ptr;
int rc = 0;
- int length, bytes_left;
- __u16 count;
+ int length;
+ __u16 bytes_left, count;
if (ses == NULL)
return -EIO;
@@ -2965,7 +2961,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++; /* skip password */
/* already aligned so no need to do it below */
} else {
- pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
@@ -2983,7 +2979,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
#endif /* CIFS_WEAK_PW_HASH */
SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
- bcc_ptr += CIFS_SESS_KEY_SIZE;
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {
/* must align unicode strings */
*bcc_ptr = 0; /* null byte password */
@@ -3021,7 +3017,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
- CIFS_STD_OP);
+ 0);
/* above now done in SendReceive */
if ((rc == 0) && (tcon != NULL)) {
@@ -3031,7 +3027,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
tcon->need_reconnect = false;
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
- bytes_left = BCC(smb_buffer_response);
+ bytes_left = get_bcc(smb_buffer_response);
length = strnlen(bcc_ptr, bytes_left - 2);
if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d843631..d7d65a7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -287,6 +287,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct inode *inode = cifs_file->dentry->d_inode;
struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsLockInfo *li, *tmp;
spin_lock(&cifs_file_list_lock);
@@ -302,6 +303,13 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
if (list_empty(&cifsi->openFileList)) {
cFYI(1, "closing last open instance for inode %p",
cifs_file->dentry->d_inode);
+
+ /* in strict cache mode we need invalidate mapping on the last
+ close because it may cause a error when we open this file
+ again and get at least level II oplock */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+ CIFS_I(inode)->invalid_mapping = true;
+
cifs_set_oplock_level(cifsi, 0);
}
spin_unlock(&cifs_file_list_lock);
@@ -726,12 +734,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
/* BB we could chain these into one lock request BB */
rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
- 0, 1, lockType, 0 /* wait flag */ );
+ 0, 1, lockType, 0 /* wait flag */, 0);
if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
- 0 /* wait flag */ );
+ 0 /* wait flag */, 0);
pfLock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, "Error unlocking previously locked "
@@ -748,13 +756,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 0, 1,
lockType | LOCKING_ANDX_SHARED_LOCK,
- 0 /* wait flag */);
+ 0 /* wait flag */, 0);
if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid,
length, pfLock->fl_start, 1, 0,
lockType |
LOCKING_ANDX_SHARED_LOCK,
- 0 /* wait flag */);
+ 0 /* wait flag */, 0);
pfLock->fl_type = F_RDLCK;
if (rc != 0)
cERROR(1, "Error unlocking "
@@ -797,8 +805,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
if (numLock) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
- pfLock->fl_start,
- 0, numLock, lockType, wait_flag);
+ pfLock->fl_start, 0, numLock, lockType,
+ wait_flag, 0);
if (rc == 0) {
/* For Windows locks we must store them. */
@@ -818,9 +826,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
(pfLock->fl_start + length) >=
(li->offset + li->length)) {
stored_rc = CIFSSMBLock(xid, tcon,
- netfid,
- li->length, li->offset,
- 1, 0, li->type, false);
+ netfid, li->length,
+ li->offset, 1, 0,
+ li->type, false, 0);
if (stored_rc)
rc = stored_rc;
else {
@@ -839,29 +847,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
return rc;
}
-/*
- * Set the timeout on write requests past EOF. For some servers (Windows)
- * these calls can be very long.
- *
- * If we're writing >10M past the EOF we give a 180s timeout. Anything less
- * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
- * The 10M cutoff is totally arbitrary. A better scheme for this would be
- * welcome if someone wants to suggest one.
- *
- * We may be able to do a better job with this if there were some way to
- * declare that a file should be sparse.
- */
-static int
-cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
-{
- if (offset <= cifsi->server_eof)
- return CIFS_STD_OP;
- else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
- return CIFS_VLONG_OP;
- else
- return CIFS_LONG_OP;
-}
-
/* update the file size (if needed) after a write */
static void
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
@@ -882,7 +867,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- int xid, long_op;
+ int xid;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
@@ -903,7 +888,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
xid = GetXid();
- long_op = cifs_write_timeout(cifsi, *poffset);
for (total_written = 0; write_size > total_written;
total_written += bytes_written) {
rc = -EAGAIN;
@@ -931,7 +915,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
min_t(const int, cifs_sb->wsize,
write_size - total_written),
*poffset, &bytes_written,
- NULL, write_data + total_written, long_op);
+ NULL, write_data + total_written, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -944,8 +928,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
cifs_update_eof(cifsi, *poffset, bytes_written);
*poffset += bytes_written;
}
- long_op = CIFS_STD_OP; /* subsequent writes fast -
- 15 seconds is plenty */
}
cifs_stats_bytes_written(pTcon, total_written);
@@ -974,7 +956,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- int xid, long_op;
+ int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
@@ -987,7 +969,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
xid = GetXid();
- long_op = cifs_write_timeout(cifsi, *poffset);
for (total_written = 0; write_size > total_written;
total_written += bytes_written) {
rc = -EAGAIN;
@@ -1017,7 +998,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
rc = CIFSSMBWrite2(xid, pTcon,
open_file->netfid, len,
*poffset, &bytes_written,
- iov, 1, long_op);
+ iov, 1, 0);
} else
rc = CIFSSMBWrite(xid, pTcon,
open_file->netfid,
@@ -1025,7 +1006,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
write_size - total_written),
*poffset, &bytes_written,
write_data + total_written,
- NULL, long_op);
+ NULL, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -1038,8 +1019,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
cifs_update_eof(cifsi, *poffset, bytes_written);
*poffset += bytes_written;
}
- long_op = CIFS_STD_OP; /* subsequent writes fast -
- 15 seconds is plenty */
}
cifs_stats_bytes_written(pTcon, total_written);
@@ -1239,7 +1218,7 @@ static int cifs_writepages(struct address_space *mapping,
struct pagevec pvec;
int rc = 0;
int scanned = 0;
- int xid, long_op;
+ int xid;
cifs_sb = CIFS_SB(mapping->host->i_sb);
@@ -1377,43 +1356,67 @@ retry:
break;
}
if (n_iov) {
+retry_write:
open_file = find_writable_file(CIFS_I(mapping->host),
false);
if (!open_file) {
cERROR(1, "No writable handles for inode");
rc = -EBADF;
} else {
- long_op = cifs_write_timeout(cifsi, offset);
rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
bytes_to_write, offset,
&bytes_written, iov, n_iov,
- long_op);
+ 0);
cifsFileInfo_put(open_file);
- cifs_update_eof(cifsi, offset, bytes_written);
}
- if (rc || bytes_written < bytes_to_write) {
- cERROR(1, "Write2 ret %d, wrote %d",
- rc, bytes_written);
- mapping_set_error(mapping, rc);
- } else {
+ cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
+
+ /*
+ * For now, treat a short write as if nothing got
+ * written. A zero length write however indicates
+ * ENOSPC or EFBIG. We have no way to know which
+ * though, so call it ENOSPC for now. EFBIG would
+ * get translated to AS_EIO anyway.
+ *
+ * FIXME: make it take into account the data that did
+ * get written
+ */
+ if (rc == 0) {
+ if (bytes_written == 0)
+ rc = -ENOSPC;
+ else if (bytes_written < bytes_to_write)
+ rc = -EAGAIN;
+ }
+
+ /* retry on data-integrity flush */
+ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
+ goto retry_write;
+
+ /* fix the stats and EOF */
+ if (bytes_written > 0) {
cifs_stats_bytes_written(tcon, bytes_written);
+ cifs_update_eof(cifsi, offset, bytes_written);
}
for (i = 0; i < n_iov; i++) {
page = pvec.pages[first + i];
- /* Should we also set page error on
- success rc but too little data written? */
- /* BB investigate retry logic on temporary
- server crash cases and how recovery works
- when page marked as error */
- if (rc)
+ /* on retryable write error, redirty page */
+ if (rc == -EAGAIN)
+ redirty_page_for_writepage(wbc, page);
+ else if (rc != 0)
SetPageError(page);
kunmap(page);
unlock_page(page);
end_page_writeback(page);
page_cache_release(page);
}
+
+ if (rc != -EAGAIN)
+ mapping_set_error(mapping, rc);
+ else
+ rc = 0;
+
if ((wbc->nr_to_write -= n_iov) <= 0)
done = 1;
index = next;
@@ -1525,27 +1528,47 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
return rc;
}
-int cifs_fsync(struct file *file, int datasync)
+int cifs_strict_fsync(struct file *file, int datasync)
{
int xid;
int rc = 0;
struct cifsTconInfo *tcon;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
xid = GetXid();
cFYI(1, "Sync file - name: %s datasync: 0x%x",
file->f_path.dentry->d_name.name, datasync);
- rc = filemap_write_and_wait(inode->i_mapping);
- if (rc == 0) {
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ if (!CIFS_I(inode)->clientCanCacheRead)
+ cifs_invalidate_mapping(inode);
- tcon = tlink_tcon(smbfile->tlink);
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
- rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
- }
+ tcon = tlink_tcon(smbfile->tlink);
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+ rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
+
+ FreeXid(xid);
+ return rc;
+}
+
+int cifs_fsync(struct file *file, int datasync)
+{
+ int xid;
+ int rc = 0;
+ struct cifsTconInfo *tcon;
+ struct cifsFileInfo *smbfile = file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+
+ xid = GetXid();
+
+ cFYI(1, "Sync file - name: %s datasync: 0x%x",
+ file->f_path.dentry->d_name.name, datasync);
+
+ tcon = tlink_tcon(smbfile->tlink);
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+ rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
FreeXid(xid);
return rc;
@@ -1596,42 +1619,42 @@ int cifs_flush(struct file *file, fl_owner_t id)
return rc;
}
-ssize_t cifs_user_read(struct file *file, char __user *read_data,
- size_t read_size, loff_t *poffset)
+static ssize_t
+cifs_iovec_read(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *poffset)
{
- int rc = -EACCES;
- unsigned int bytes_read = 0;
- unsigned int total_read = 0;
- unsigned int current_read_size;
+ int rc;
+ int xid;
+ unsigned int total_read, bytes_read = 0;
+ size_t len, cur_len;
+ int iov_offset = 0;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- int xid;
struct cifsFileInfo *open_file;
- char *smb_read_data;
- char __user *current_offset;
struct smb_com_read_rsp *pSMBr;
+ char *read_data;
+
+ if (!nr_segs)
+ return 0;
+
+ len = iov_length(iov, nr_segs);
+ if (!len)
+ return 0;
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- if (file->private_data == NULL) {
- rc = -EBADF;
- FreeXid(xid);
- return rc;
- }
open_file = file->private_data;
pTcon = tlink_tcon(open_file->tlink);
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
- for (total_read = 0, current_offset = read_data;
- read_size > total_read;
- total_read += bytes_read, current_offset += bytes_read) {
- current_read_size = min_t(const int, read_size - total_read,
- cifs_sb->rsize);
+ for (total_read = 0; total_read < len; total_read += bytes_read) {
+ cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
rc = -EAGAIN;
- smb_read_data = NULL;
+ read_data = NULL;
+
while (rc == -EAGAIN) {
int buf_type = CIFS_NO_BUFFER;
if (open_file->invalidHandle) {
@@ -1639,27 +1662,25 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
if (rc != 0)
break;
}
- rc = CIFSSMBRead(xid, pTcon,
- open_file->netfid,
- current_read_size, *poffset,
- &bytes_read, &smb_read_data,
- &buf_type);
- pSMBr = (struct smb_com_read_rsp *)smb_read_data;
- if (smb_read_data) {
- if (copy_to_user(current_offset,
- smb_read_data +
- 4 /* RFC1001 length field */ +
- le16_to_cpu(pSMBr->DataOffset),
- bytes_read))
+ rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
+ cur_len, *poffset, &bytes_read,
+ &read_data, &buf_type);
+ pSMBr = (struct smb_com_read_rsp *)read_data;
+ if (read_data) {
+ char *data_offset = read_data + 4 +
+ le16_to_cpu(pSMBr->DataOffset);
+ if (memcpy_toiovecend(iov, data_offset,
+ iov_offset, bytes_read))
rc = -EFAULT;
-
if (buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(smb_read_data);
+ cifs_small_buf_release(read_data);
else if (buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(smb_read_data);
- smb_read_data = NULL;
+ cifs_buf_release(read_data);
+ read_data = NULL;
+ iov_offset += bytes_read;
}
}
+
if (rc || (bytes_read == 0)) {
if (total_read) {
break;
@@ -1672,13 +1693,57 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
*poffset += bytes_read;
}
}
+
FreeXid(xid);
return total_read;
}
+ssize_t cifs_user_read(struct file *file, char __user *read_data,
+ size_t read_size, loff_t *poffset)
+{
+ struct iovec iov;
+ iov.iov_base = read_data;
+ iov.iov_len = read_size;
+
+ return cifs_iovec_read(file, &iov, 1, poffset);
+}
+
+static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t read;
+
+ read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
+ if (read > 0)
+ iocb->ki_pos = pos;
+
+ return read;
+}
+
+ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct inode *inode;
+
+ inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+ if (CIFS_I(inode)->clientCanCacheRead)
+ return generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+ /*
+ * In strict cache mode we need to read from the server all the time
+ * if we don't have level II oplock because the server can delay mtime
+ * change - so we can't make a decision about inode invalidating.
+ * And we can also fail with pagereading if there are mandatory locks
+ * on pages affected by this read but not on the region from pos to
+ * pos+len-1.
+ */
+
+ return cifs_user_readv(iocb, iov, nr_segs, pos);
+}
static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
- loff_t *poffset)
+ loff_t *poffset)
{
int rc = -EACCES;
unsigned int bytes_read = 0;
@@ -1746,6 +1811,21 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
return total_read;
}
+int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int rc, xid;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ xid = GetXid();
+
+ if (!CIFS_I(inode)->clientCanCacheRead)
+ cifs_invalidate_mapping(inode);
+
+ rc = generic_file_mmap(file, vma);
+ FreeXid(xid);
+ return rc;
+}
+
int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
@@ -2192,7 +2272,8 @@ void cifs_oplock_break(struct work_struct *work)
*/
if (!cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
- 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
+ 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
+ cinode->clientCanCacheRead ? 1 : 0);
cFYI(1, "Oplock release rc = %d", rc);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6c9ee80..8852470 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -44,13 +44,17 @@ static void cifs_set_ops(struct inode *inode)
inode->i_fop = &cifs_file_direct_nobrl_ops;
else
inode->i_fop = &cifs_file_direct_ops;
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ inode->i_fop = &cifs_file_strict_nobrl_ops;
+ else
+ inode->i_fop = &cifs_file_strict_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_nobrl_ops;
else { /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
}
-
/* check if server can support readpages */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
@@ -1679,7 +1683,7 @@ cifs_inode_needs_reval(struct inode *inode)
/*
* Zap the cache. Called when invalid_mapping flag is set.
*/
-static void
+void
cifs_invalidate_mapping(struct inode *inode)
{
int rc;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 43f1028..a09e077 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -571,7 +571,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
pCifsInode = CIFS_I(netfile->dentry->d_inode);
cifs_set_oplock_level(pCifsInode,
- pSMB->OplockLevel);
+ pSMB->OplockLevel ? OPLOCK_READ : 0);
/*
* cifs_oplock_break_put() can't be called
* from here. Get reference after queueing
@@ -637,77 +637,6 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
return;
}
-/* Convert 16 bit Unicode pathname to wire format from string in current code
- page. Conversion may involve remapping up the seven characters that are
- only legal in POSIX-like OS (if they are present in the string). Path
- names are little endian 16 bit Unicode on the wire */
-int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
- const struct nls_table *cp, int mapChars)
-{
- int i, j, charlen;
- int len_remaining = maxlen;
- char src_char;
- __u16 temp;
-
- if (!mapChars)
- return cifs_strtoUCS(target, source, PATH_MAX, cp);
-
- for (i = 0, j = 0; i < maxlen; j++) {
- src_char = source[i];
- switch (src_char) {
- case 0:
- target[j] = 0;
- goto ctoUCS_out;
- case ':':
- target[j] = cpu_to_le16(UNI_COLON);
- break;
- case '*':
- target[j] = cpu_to_le16(UNI_ASTERIK);
- break;
- case '?':
- target[j] = cpu_to_le16(UNI_QUESTION);
- break;
- case '<':
- target[j] = cpu_to_le16(UNI_LESSTHAN);
- break;
- case '>':
- target[j] = cpu_to_le16(UNI_GRTRTHAN);
- break;
- case '|':
- target[j] = cpu_to_le16(UNI_PIPE);
- break;
- /* BB We can not handle remapping slash until
- all the calls to build_path_from_dentry
- are modified, as they use slash as separator BB */
- /* case '\\':
- target[j] = cpu_to_le16(UNI_SLASH);
- break;*/
- default:
- charlen = cp->char2uni(source+i,
- len_remaining, &temp);
- /* if no match, use question mark, which
- at least in some cases servers as wild card */
- if (charlen < 1) {
- target[j] = cpu_to_le16(0x003f);
- charlen = 1;
- } else
- target[j] = cpu_to_le16(temp);
- len_remaining -= charlen;
- /* character may take more than one byte in the
- the source string, but will take exactly two
- bytes in the target string */
- i += charlen;
- continue;
- }
- i++; /* move to next char in source string */
- len_remaining--;
- }
-
-ctoUCS_out:
- return i;
-}
-
void
cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
{
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 6783ce6..8d9189f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -916,14 +916,14 @@ unsigned int
smbCalcSize(struct smb_hdr *ptr)
{
return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
- 2 /* size of the bcc field */ + BCC(ptr));
+ 2 /* size of the bcc field */ + get_bcc(ptr));
}
unsigned int
smbCalcSize_LE(struct smb_hdr *ptr)
{
return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
- 2 /* size of the bcc field */ + le16_to_cpu(BCC_LE(ptr)));
+ 2 /* size of the bcc field */ + get_bcc_le(ptr));
}
/* The following are taken from fs/ntfs/util.c */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index eb74648..1adc962 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -277,7 +277,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
}
static void
-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
int len;
@@ -323,7 +323,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
return;
}
-static int decode_ascii_ssetup(char **pbcc_area, int bleft,
+static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
@@ -575,12 +575,11 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
char *str_area;
SESSION_SETUP_ANDX *pSMB;
__u32 capabilities;
- int count;
+ __u16 count;
int resp_buf_type;
struct kvec iov[3];
enum securityEnum type;
- __u16 action;
- int bytes_remaining;
+ __u16 action, bytes_remaining;
struct key *spnego_key = NULL;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
u16 blob_len;
@@ -876,10 +875,10 @@ ssetup_ntlmssp_authenticate:
count = iov[1].iov_len + iov[2].iov_len;
smb_buf->smb_buf_length += count;
- BCC_LE(smb_buf) = cpu_to_le16(count);
+ put_bcc_le(count, smb_buf);
rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type,
- CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
+ CIFS_LOG_ERROR);
/* SMB request buf freed in SendReceive2 */
pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
@@ -910,7 +909,7 @@ ssetup_ntlmssp_authenticate:
cFYI(1, "UID = %d ", ses->Suid);
/* response can have either 3 or 4 word count - Samba sends 3 */
/* and lanman response is 3 */
- bytes_remaining = BCC(smb_buf);
+ bytes_remaining = get_bcc(smb_buf);
bcc_ptr = pByteArea(smb_buf);
if (smb_buf->WordCount == 4) {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 59ca81b..c1ccca1 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -36,7 +36,13 @@
extern mempool_t *cifs_mid_poolp;
-static struct mid_q_entry *
+static void
+wake_up_task(struct mid_q_entry *mid)
+{
+ wake_up_process(mid->callback_data);
+}
+
+struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
{
struct mid_q_entry *temp;
@@ -58,28 +64,28 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
- temp->tsk = current;
+
+ /*
+ * The default is for the mid to be synchronous, so the
+ * default callback just wakes up the current task.
+ */
+ temp->callback = wake_up_task;
+ temp->callback_data = current;
}
- spin_lock(&GlobalMid_Lock);
- list_add_tail(&temp->qhead, &server->pending_mid_q);
atomic_inc(&midCount);
temp->midState = MID_REQUEST_ALLOCATED;
- spin_unlock(&GlobalMid_Lock);
return temp;
}
-static void
+void
DeleteMidQEntry(struct mid_q_entry *midEntry)
{
#ifdef CONFIG_CIFS_STATS2
unsigned long now;
#endif
- spin_lock(&GlobalMid_Lock);
midEntry->midState = MID_FREE;
- list_del(&midEntry->qhead);
atomic_dec(&midCount);
- spin_unlock(&GlobalMid_Lock);
if (midEntry->largeBuf)
cifs_buf_release(midEntry->resp_buf);
else
@@ -103,6 +109,16 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
mempool_free(midEntry, cifs_mid_poolp);
}
+static void
+delete_mid(struct mid_q_entry *mid)
+{
+ spin_lock(&GlobalMid_Lock);
+ list_del(&mid->qhead);
+ spin_unlock(&GlobalMid_Lock);
+
+ DeleteMidQEntry(mid);
+}
+
static int
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
@@ -244,31 +260,31 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
return smb_sendv(server, &iov, 1);
}
-static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
+static int wait_for_free_request(struct TCP_Server_Info *server,
+ const int long_op)
{
if (long_op == CIFS_ASYNC_OP) {
/* oplock breaks must not be held up */
- atomic_inc(&ses->server->inFlight);
+ atomic_inc(&server->inFlight);
return 0;
}
spin_lock(&GlobalMid_Lock);
while (1) {
- if (atomic_read(&ses->server->inFlight) >=
- cifs_max_pending){
+ if (atomic_read(&server->inFlight) >= cifs_max_pending) {
spin_unlock(&GlobalMid_Lock);
#ifdef CONFIG_CIFS_STATS2
- atomic_inc(&ses->server->num_waiters);
+ atomic_inc(&server->num_waiters);
#endif
- wait_event(ses->server->request_q,
- atomic_read(&ses->server->inFlight)
+ wait_event(server->request_q,
+ atomic_read(&server->inFlight)
< cifs_max_pending);
#ifdef CONFIG_CIFS_STATS2
- atomic_dec(&ses->server->num_waiters);
+ atomic_dec(&server->num_waiters);
#endif
spin_lock(&GlobalMid_Lock);
} else {
- if (ses->server->tcpStatus == CifsExiting) {
+ if (server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
return -ENOENT;
}
@@ -278,7 +294,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
/* update # of requests on the wire to server */
if (long_op != CIFS_BLOCKING_OP)
- atomic_inc(&ses->server->inFlight);
+ atomic_inc(&server->inFlight);
spin_unlock(&GlobalMid_Lock);
break;
}
@@ -308,53 +324,81 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL)
return -ENOMEM;
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
return 0;
}
-static int wait_for_response(struct cifsSesInfo *ses,
- struct mid_q_entry *midQ,
- unsigned long timeout,
- unsigned long time_to_wait)
+static int
+wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
{
- unsigned long curr_timeout;
+ int error;
- for (;;) {
- curr_timeout = timeout + jiffies;
- wait_event_timeout(ses->server->response_q,
- midQ->midState != MID_REQUEST_SUBMITTED, timeout);
+ error = wait_event_killable(server->response_q,
+ midQ->midState != MID_REQUEST_SUBMITTED);
+ if (error < 0)
+ return -ERESTARTSYS;
- if (time_after(jiffies, curr_timeout) &&
- (midQ->midState == MID_REQUEST_SUBMITTED) &&
- ((ses->server->tcpStatus == CifsGood) ||
- (ses->server->tcpStatus == CifsNew))) {
+ return 0;
+}
- unsigned long lrt;
- /* We timed out. Is the server still
- sending replies ? */
- spin_lock(&GlobalMid_Lock);
- lrt = ses->server->lstrp;
- spin_unlock(&GlobalMid_Lock);
+/*
+ * Send a SMB request and set the callback function in the mid to handle
+ * the result. Caller is responsible for dealing with timeouts.
+ */
+int
+cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+ mid_callback_t *callback, void *cbdata)
+{
+ int rc;
+ struct mid_q_entry *mid;
- /* Calculate time_to_wait past last receive time.
- Although we prefer not to time out if the
- server is still responding - we will time
- out if the server takes more than 15 (or 45
- or 180) seconds to respond to this request
- and has not responded to any request from
- other threads on the client within 10 seconds */
- lrt += time_to_wait;
- if (time_after(jiffies, lrt)) {
- /* No replies for time_to_wait. */
- cERROR(1, "server not responding");
- return -1;
- }
- } else {
- return 0;
- }
+ rc = wait_for_free_request(server, CIFS_ASYNC_OP);
+ if (rc)
+ return rc;
+
+ mutex_lock(&server->srv_mutex);
+ mid = AllocMidQEntry(in_buf, server);
+ if (mid == NULL) {
+ mutex_unlock(&server->srv_mutex);
+ return -ENOMEM;
}
-}
+ /* put it on the pending_mid_q */
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&mid->qhead, &server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
+
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ if (rc) {
+ mutex_unlock(&server->srv_mutex);
+ goto out_err;
+ }
+
+ mid->callback = callback;
+ mid->callback_data = cbdata;
+ mid->midState = MID_REQUEST_SUBMITTED;
+#ifdef CONFIG_CIFS_STATS2
+ atomic_inc(&server->inSend);
+#endif
+ rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+#ifdef CONFIG_CIFS_STATS2
+ atomic_dec(&server->inSend);
+ mid->when_sent = jiffies;
+#endif
+ mutex_unlock(&server->srv_mutex);
+ if (rc)
+ goto out_err;
+
+ return rc;
+out_err:
+ delete_mid(mid);
+ atomic_dec(&server->inFlight);
+ wake_up(&server->request_q);
+ return rc;
+}
/*
*
@@ -382,6 +426,81 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
return rc;
}
+static int
+sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+{
+ int rc = 0;
+
+ cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
+ mid->mid, mid->midState);
+
+ spin_lock(&GlobalMid_Lock);
+ /* ensure that it's no longer on the pending_mid_q */
+ list_del_init(&mid->qhead);
+
+ switch (mid->midState) {
+ case MID_RESPONSE_RECEIVED:
+ spin_unlock(&GlobalMid_Lock);
+ return rc;
+ case MID_REQUEST_SUBMITTED:
+ /* socket is going down, reject all calls */
+ if (server->tcpStatus == CifsExiting) {
+ cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
+ __func__, mid->mid, mid->command, mid->midState);
+ rc = -EHOSTDOWN;
+ break;
+ }
+ case MID_RETRY_NEEDED:
+ rc = -EAGAIN;
+ break;
+ default:
+ cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
+ mid->mid, mid->midState);
+ rc = -EIO;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ DeleteMidQEntry(mid);
+ return rc;
+}
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+ struct mid_q_entry *mid)
+{
+ int rc = 0;
+
+ /* -4 for RFC1001 length and +2 for BCC field */
+ in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4 + 2;
+ in_buf->Command = SMB_COM_NT_CANCEL;
+ in_buf->WordCount = 0;
+ put_bcc_le(0, in_buf);
+
+ mutex_lock(&server->srv_mutex);
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ if (rc) {
+ mutex_unlock(&server->srv_mutex);
+ return rc;
+ }
+ rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+ mutex_unlock(&server->srv_mutex);
+
+ cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+ in_buf->Mid, rc);
+
+ return rc;
+}
+
int
SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -390,7 +509,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
int rc = 0;
int long_op;
unsigned int receive_len;
- unsigned long timeout;
struct mid_q_entry *midQ;
struct smb_hdr *in_buf = iov[0].iov_base;
@@ -413,7 +531,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
to the same server. We may make this configurable later or
use ses->maxReq */
- rc = wait_for_free_request(ses, long_op);
+ rc = wait_for_free_request(ses->server, long_op);
if (rc) {
cifs_small_buf_release(in_buf);
return rc;
@@ -457,65 +575,20 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if (rc < 0)
goto out;
- if (long_op == CIFS_STD_OP)
- timeout = 15 * HZ;
- else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
- timeout = 180 * HZ;
- else if (long_op == CIFS_LONG_OP)
- timeout = 45 * HZ; /* should be greater than
- servers oplock break timeout (about 43 seconds) */
- else if (long_op == CIFS_ASYNC_OP)
+ if (long_op == CIFS_ASYNC_OP)
goto out;
- else if (long_op == CIFS_BLOCKING_OP)
- timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
- else {
- cERROR(1, "unknown timeout flag %d", long_op);
- rc = -EIO;
- goto out;
- }
-
- /* wait for 15 seconds or until woken up due to response arriving or
- due to last connection to this server being unmounted */
- if (signal_pending(current)) {
- /* if signal pending do not hold up user for full smb timeout
- but we still give response a chance to complete */
- timeout = 2 * HZ;
- }
-
- /* No user interrupts in wait - wreaks havoc with performance */
- wait_for_response(ses, midQ, timeout, 10 * HZ);
-
- spin_lock(&GlobalMid_Lock);
- if (midQ->resp_buf == NULL) {
- cERROR(1, "No response to cmd %d mid %d",
- midQ->command, midQ->mid);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
- if (ses->server->tcpStatus == CifsExiting)
- rc = -EHOSTDOWN;
- else {
- ses->server->tcpStatus = CifsNeedReconnect;
- midQ->midState = MID_RETRY_NEEDED;
- }
- }
+ rc = wait_for_response(ses->server, midQ);
+ if (rc != 0)
+ goto out;
- if (rc != -EHOSTDOWN) {
- if (midQ->midState == MID_RETRY_NEEDED) {
- rc = -EAGAIN;
- cFYI(1, "marking request for retry");
- } else {
- rc = -EIO;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- DeleteMidQEntry(midQ);
- /* Update # of requests on wire to server */
+ rc = sync_mid_result(midQ, ses->server);
+ if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -559,19 +632,18 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if (receive_len >= sizeof(struct smb_hdr) - 4
/* do not count RFC1001 header */ +
(2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
- BCC(midQ->resp_buf) =
- le16_to_cpu(BCC_LE(midQ->resp_buf));
+ put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
if ((flags & CIFS_NO_RESP) == 0)
midQ->resp_buf = NULL; /* mark it so buf will
not be freed by
- DeleteMidQEntry */
+ delete_mid */
} else {
rc = -EIO;
cFYI(1, "Bad MID state?");
}
out:
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
@@ -585,7 +657,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
{
int rc = 0;
unsigned int receive_len;
- unsigned long timeout;
struct mid_q_entry *midQ;
if (ses == NULL) {
@@ -610,7 +681,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
return -EIO;
}
- rc = wait_for_free_request(ses, long_op);
+ rc = wait_for_free_request(ses->server, long_op);
if (rc)
return rc;
@@ -649,64 +720,20 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (rc < 0)
goto out;
- if (long_op == CIFS_STD_OP)
- timeout = 15 * HZ;
- /* wait for 15 seconds or until woken up due to response arriving or
- due to last connection to this server being unmounted */
- else if (long_op == CIFS_ASYNC_OP)
+ if (long_op == CIFS_ASYNC_OP)
goto out;
- else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
- timeout = 180 * HZ;
- else if (long_op == CIFS_LONG_OP)
- timeout = 45 * HZ; /* should be greater than
- servers oplock break timeout (about 43 seconds) */
- else if (long_op == CIFS_BLOCKING_OP)
- timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
- else {
- cERROR(1, "unknown timeout flag %d", long_op);
- rc = -EIO;
- goto out;
- }
- if (signal_pending(current)) {
- /* if signal pending do not hold up user for full smb timeout
- but we still give response a chance to complete */
- timeout = 2 * HZ;
- }
-
- /* No user interrupts in wait - wreaks havoc with performance */
- wait_for_response(ses, midQ, timeout, 10 * HZ);
-
- spin_lock(&GlobalMid_Lock);
- if (midQ->resp_buf == NULL) {
- cERROR(1, "No response for cmd %d mid %d",
- midQ->command, midQ->mid);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
- if (ses->server->tcpStatus == CifsExiting)
- rc = -EHOSTDOWN;
- else {
- ses->server->tcpStatus = CifsNeedReconnect;
- midQ->midState = MID_RETRY_NEEDED;
- }
- }
+ rc = wait_for_response(ses->server, midQ);
+ if (rc != 0)
+ goto out;
- if (rc != -EHOSTDOWN) {
- if (midQ->midState == MID_RETRY_NEEDED) {
- rc = -EAGAIN;
- cFYI(1, "marking request for retry");
- } else {
- rc = -EIO;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- DeleteMidQEntry(midQ);
- /* Update # of requests on wire to server */
+ rc = sync_mid_result(midQ, ses->server);
+ if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -748,43 +775,20 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (receive_len >= sizeof(struct smb_hdr) - 4
/* do not count RFC1001 header */ +
(2 * out_buf->WordCount) + 2 /* bcc */ )
- BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+ put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
} else {
rc = -EIO;
cERROR(1, "Bad MID state?");
}
out:
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
-/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
-
-static int
-send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
- struct mid_q_entry *midQ)
-{
- int rc = 0;
- struct cifsSesInfo *ses = tcon->ses;
- __u16 mid = in_buf->Mid;
-
- header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
- in_buf->Mid = mid;
- mutex_lock(&ses->server->srv_mutex);
- rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
- if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
- return rc;
- }
- rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
- mutex_unlock(&ses->server->srv_mutex);
- return rc;
-}
-
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
blocking lock to return. */
@@ -807,7 +811,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
pSMB->hdr.Mid = GetNextMid(ses->server);
return SendReceive(xid, ses, in_buf, out_buf,
- &bytes_returned, CIFS_STD_OP);
+ &bytes_returned, 0);
}
int
@@ -845,7 +849,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
return -EIO;
}
- rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
+ rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
if (rc)
return rc;
@@ -863,7 +867,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
if (rc) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
@@ -880,7 +884,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
return rc;
}
@@ -899,10 +903,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
-
- rc = send_nt_cancel(tcon, in_buf, midQ);
+ rc = send_nt_cancel(ses->server, in_buf, midQ);
if (rc) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
return rc;
}
} else {
@@ -914,47 +917,22 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
/* If we get -ENOLCK back the lock may have
already been removed. Don't exit in this case. */
if (rc && rc != -ENOLCK) {
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
return rc;
}
}
- /* Wait 5 seconds for the response. */
- if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
+ if (wait_for_response(ses->server, midQ) == 0) {
/* We got the response - restart system call. */
rstart = 1;
}
}
- spin_lock(&GlobalMid_Lock);
- if (midQ->resp_buf) {
- spin_unlock(&GlobalMid_Lock);
- receive_len = midQ->resp_buf->smb_buf_length;
- } else {
- cERROR(1, "No response for cmd %d mid %d",
- midQ->command, midQ->mid);
- if (midQ->midState == MID_REQUEST_SUBMITTED) {
- if (ses->server->tcpStatus == CifsExiting)
- rc = -EHOSTDOWN;
- else {
- ses->server->tcpStatus = CifsNeedReconnect;
- midQ->midState = MID_RETRY_NEEDED;
- }
- }
-
- if (rc != -EHOSTDOWN) {
- if (midQ->midState == MID_RETRY_NEEDED) {
- rc = -EAGAIN;
- cFYI(1, "marking request for retry");
- } else {
- rc = -EIO;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- DeleteMidQEntry(midQ);
+ rc = sync_mid_result(midQ, ses->server);
+ if (rc != 0)
return rc;
- }
+ receive_len = midQ->resp_buf->smb_buf_length;
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
cERROR(1, "Frame too large received. Length: %d Xid: %d",
receive_len, xid);
@@ -998,10 +976,10 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
if (receive_len >= sizeof(struct smb_hdr) - 4
/* do not count RFC1001 header */ +
(2 * out_buf->WordCount) + 2 /* bcc */ )
- BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+ put_bcc(get_bcc_le(out_buf), out_buf);
out:
- DeleteMidQEntry(midQ);
+ delete_mid(midQ);
if (rstart && rc == -EACCES)
return -ERESTARTSYS;
return rc;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 85882f6..b044705 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,12 +325,16 @@ void dio_end_io(struct bio *bio, int error)
}
EXPORT_SYMBOL_GPL(dio_end_io);
-static int
+static void
dio_bio_alloc(struct dio *dio, struct block_device *bdev,
sector_t first_sector, int nr_vecs)
{
struct bio *bio;
+ /*
+ * bio_alloc() is guaranteed to return a bio when called with
+ * __GFP_WAIT and we request a valid number of vectors.
+ */
bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio->bi_bdev = bdev;
@@ -342,7 +346,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
dio->bio = bio;
dio->logical_offset_in_bio = dio->cur_page_fs_offset;
- return 0;
}
/*
@@ -583,8 +586,9 @@ static int dio_new_bio(struct dio *dio, sector_t start_sector)
goto out;
sector = start_sector << (dio->blkbits - 9);
nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
+ nr_pages = min(nr_pages, BIO_MAX_PAGES);
BUG_ON(nr_pages <= 0);
- ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
+ dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
dio->boundary = 0;
out:
return ret;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7aa767d..85c8cc8 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -754,7 +754,7 @@ static int ext3_release_dquot(struct dquot *dquot);
static int ext3_mark_dquot_dirty(struct dquot *dquot);
static int ext3_write_info(struct super_block *sb, int type);
static int ext3_quota_on(struct super_block *sb, int type, int format_id,
- char *path);
+ struct path *path);
static int ext3_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -2877,27 +2877,20 @@ static int ext3_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int ext3_quota_on(struct super_block *sb, int type, int format_id,
- char *name)
+ struct path *path)
{
int err;
- struct path path;
if (!test_opt(sb, QUOTA))
return -EINVAL;
- err = kern_path(name, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
-
/* Quotafile not on the same filesystem? */
- if (path.mnt->mnt_sb != sb) {
- path_put(&path);
+ if (path->mnt->mnt_sb != sb)
return -EXDEV;
- }
/* Journaling quota? */
if (EXT3_SB(sb)->s_qf_names[type]) {
/* Quotafile not of fs root? */
- if (path.dentry->d_parent != sb->s_root)
+ if (path->dentry->d_parent != sb->s_root)
ext3_msg(sb, KERN_WARNING,
"warning: Quota file not on filesystem root. "
"Journaled quota will not work.");
@@ -2907,7 +2900,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
* When we journal data on quota file, we have to flush journal to see
* all updates to the file when we bypass pagecache...
*/
- if (ext3_should_journal_data(path.dentry->d_inode)) {
+ if (ext3_should_journal_data(path->dentry->d_inode)) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
@@ -2915,15 +2908,11 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
journal_lock_updates(EXT3_SB(sb)->s_journal);
err = journal_flush(EXT3_SB(sb)->s_journal);
journal_unlock_updates(EXT3_SB(sb)->s_journal);
- if (err) {
- path_put(&path);
+ if (err)
return err;
- }
}
- err = dquot_quota_on_path(sb, type, format_id, &path);
- path_put(&path);
- return err;
+ return dquot_quota_on(sb, type, format_id, path);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cb10a06..48ce561 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1161,7 +1161,7 @@ static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- char *path);
+ struct path *path);
static int ext4_quota_off(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
@@ -4558,27 +4558,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- char *name)
+ struct path *path)
{
int err;
- struct path path;
if (!test_opt(sb, QUOTA))
return -EINVAL;
- err = kern_path(name, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
-
/* Quotafile not on the same filesystem? */
- if (path.mnt->mnt_sb != sb) {
- path_put(&path);
+ if (path->mnt->mnt_sb != sb)
return -EXDEV;
- }
/* Journaling quota? */
if (EXT4_SB(sb)->s_qf_names[type]) {
/* Quotafile not in fs root? */
- if (path.dentry->d_parent != sb->s_root)
+ if (path->dentry->d_parent != sb->s_root)
ext4_msg(sb, KERN_WARNING,
"Quota file not on filesystem root. "
"Journaled quota will not work");
@@ -4589,7 +4582,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
* all updates to the file when we bypass pagecache...
*/
if (EXT4_SB(sb)->s_journal &&
- ext4_should_journal_data(path.dentry->d_inode)) {
+ ext4_should_journal_data(path->dentry->d_inode)) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
@@ -4597,15 +4590,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
- if (err) {
- path_put(&path);
+ if (err)
return err;
- }
}
- err = dquot_quota_on_path(sb, type, format_id, &path);
- path_put(&path);
- return err;
+ return dquot_quota_on(sb, type, format_id, path);
}
static int ext4_quota_off(struct super_block *sb, int type)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2232b3c..7aa7d4f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -74,16 +74,14 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
}
/**
- * GFS2 lookup code fills in vfs inode contents based on info obtained
- * from directory entry inside gfs2_inode_lookup(). This has caused issues
- * with NFS code path since its get_dentry routine doesn't have the relevant
- * directory entry when gfs2_inode_lookup() is invoked. Part of the code
- * segment inside gfs2_inode_lookup code needs to get moved around.
+ * gfs2_set_iop - Sets inode operations
+ * @inode: The inode with correct i_mode filled in
*
- * Clears I_NEW as well.
- **/
+ * GFS2 lookup code fills in vfs inode contents based on info obtained
+ * from directory entry inside gfs2_inode_lookup().
+ */
-void gfs2_set_iop(struct inode *inode)
+static void gfs2_set_iop(struct inode *inode)
{
struct gfs2_sbd *sdp = GFS2_SB(inode);
umode_t mode = inode->i_mode;
@@ -106,8 +104,6 @@ void gfs2_set_iop(struct inode *inode)
inode->i_op = &gfs2_file_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
}
-
- unlock_new_inode(inode);
}
/**
@@ -119,10 +115,8 @@ void gfs2_set_iop(struct inode *inode)
* Returns: A VFS inode, or an error
*/
-struct inode *gfs2_inode_lookup(struct super_block *sb,
- unsigned int type,
- u64 no_addr,
- u64 no_formal_ino)
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+ u64 no_addr, u64 no_formal_ino)
{
struct inode *inode;
struct gfs2_inode *ip;
@@ -152,51 +146,37 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error))
goto fail_iopen;
- ip->i_iopen_gh.gh_gl->gl_object = ip;
+ ip->i_iopen_gh.gh_gl->gl_object = ip;
gfs2_glock_put(io_gl);
io_gl = NULL;
- if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
- goto gfs2_nfsbypass;
-
- inode->i_mode = DT2IF(type);
-
- /*
- * We must read the inode in order to work out its type in
- * this case. Note that this doesn't happen often as we normally
- * know the type beforehand. This code path only occurs during
- * unlinked inode recovery (where it is safe to do this glock,
- * which is not true in the general case).
- */
if (type == DT_UNKNOWN) {
- struct gfs2_holder gh;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- if (unlikely(error))
- goto fail_glock;
- /* Inode is now uptodate */
- gfs2_glock_dq_uninit(&gh);
+ /* Inode glock must be locked already */
+ error = gfs2_inode_refresh(GFS2_I(inode));
+ if (error)
+ goto fail_refresh;
+ } else {
+ inode->i_mode = DT2IF(type);
}
gfs2_set_iop(inode);
+ unlock_new_inode(inode);
}
-gfs2_nfsbypass:
return inode;
-fail_glock:
- gfs2_glock_dq(&ip->i_iopen_gh);
+
+fail_refresh:
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_iopen:
if (io_gl)
gfs2_glock_put(io_gl);
fail_put:
- if (inode->i_state & I_NEW)
- ip->i_gl->gl_object = NULL;
+ ip->i_gl->gl_object = NULL;
gfs2_glock_put(ip->i_gl);
fail:
- if (inode->i_state & I_NEW)
- iget_failed(inode);
- else
- iput(inode);
+ iget_failed(inode);
return ERR_PTR(error);
}
@@ -221,14 +201,6 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
if (IS_ERR(inode))
goto fail;
- error = gfs2_inode_refresh(GFS2_I(inode));
- if (error)
- goto fail_iput;
-
- /* Pick up the works we bypass in gfs2_inode_lookup */
- if (inode->i_state & I_NEW)
- gfs2_set_iop(inode);
-
/* Two extra checks for NFS only */
if (no_formal_ino) {
error = -ESTALE;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 732a183..3e00a66 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -96,7 +96,6 @@ err:
return -EIO;
}
-extern void gfs2_set_iop(struct inode *inode);
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
u64 no_addr, u64 no_formal_ino);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 16c2eca..ec73ed7 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1336,6 +1336,7 @@ static void gfs2_evict_inode(struct inode *inode)
if (error)
goto out_truncate;
+ ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
error = gfs2_glock_nq(&ip->i_iopen_gh);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 06d1f74..38f986d 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -993,8 +993,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
}
/* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id,
- char *path)
+static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
{
unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
@@ -1013,7 +1012,7 @@ static int ocfs2_quota_off(struct super_block *sb, int type)
}
static const struct quotactl_ops ocfs2_quotactl_ops = {
- .quota_on = ocfs2_quota_on,
+ .quota_on_meta = ocfs2_quota_on,
.quota_off = ocfs2_quota_off,
.quota_sync = dquot_quota_sync,
.get_info = dquot_get_dqinfo,
diff --git a/fs/pipe.c b/fs/pipe.c
index 89e9e19..da42f7d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -441,7 +441,7 @@ redo:
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(pipe);
@@ -450,7 +450,7 @@ redo:
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
@@ -612,7 +612,7 @@ redo2:
break;
}
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
@@ -623,7 +623,7 @@ redo2:
out:
mutex_unlock(&inode->i_mutex);
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0)
@@ -715,7 +715,7 @@ pipe_release(struct inode *inode, int decr, int decw)
if (!pipe->readers && !pipe->writers) {
free_pipe_info(inode);
} else {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT);
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 6a00688..15af622 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -1,5 +1,5 @@
config PROC_FS
- bool "/proc file system support" if EMBEDDED
+ bool "/proc file system support" if EXPERT
default y
help
This is a virtual file system providing information about the status
@@ -40,7 +40,7 @@ config PROC_VMCORE
Exports the dump image of crashed kernel in ELF format.
config PROC_SYSCTL
- bool "Sysctl support (/proc/sys)" if EMBEDDED
+ bool "Sysctl support (/proc/sys)" if EXPERT
depends on PROC_FS
select SYSCTL
default y
@@ -61,7 +61,7 @@ config PROC_SYSCTL
config PROC_PAGE_MONITOR
default y
depends on PROC_FS && MMU
- bool "Enable /proc page monitoring" if EMBEDDED
+ bool "Enable /proc page monitoring" if EXPERT
help
Various /proc files exist to monitor process memory utilization:
/proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 84becd3..a2a622e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2189,8 +2189,8 @@ int dquot_resume(struct super_block *sb, int type)
}
EXPORT_SYMBOL(dquot_resume);
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
- struct path *path)
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
+ struct path *path)
{
int error = security_quota_on(path->dentry);
if (error)
@@ -2204,20 +2204,6 @@ int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
DQUOT_LIMITS_ENABLED);
return error;
}
-EXPORT_SYMBOL(dquot_quota_on_path);
-
-int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name)
-{
- struct path path;
- int error;
-
- error = kern_path(name, LOOKUP_FOLLOW, &path);
- if (!error) {
- error = dquot_quota_on_path(sb, type, format_id, &path);
- path_put(&path);
- }
- return error;
-}
EXPORT_SYMBOL(dquot_quota_on);
/*
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b299961..b34bdb2 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -64,18 +64,15 @@ static int quota_sync_all(int type)
}
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
- void __user *addr)
+ struct path *path)
{
- char *pathname;
- int ret = -ENOSYS;
-
- pathname = getname(addr);
- if (IS_ERR(pathname))
- return PTR_ERR(pathname);
- if (sb->s_qcop->quota_on)
- ret = sb->s_qcop->quota_on(sb, type, id, pathname);
- putname(pathname);
- return ret;
+ if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+ return -ENOSYS;
+ if (sb->s_qcop->quota_on_meta)
+ return sb->s_qcop->quota_on_meta(sb, type, id);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ return sb->s_qcop->quota_on(sb, type, id, path);
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
@@ -241,7 +238,7 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
- void __user *addr)
+ void __user *addr, struct path *path)
{
int ret;
@@ -256,7 +253,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
switch (cmd) {
case Q_QUOTAON:
- return quota_quotaon(sb, type, cmd, id, addr);
+ return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
if (!sb->s_qcop->quota_off)
return -ENOSYS;
@@ -335,6 +332,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
{
uint cmds, type;
struct super_block *sb = NULL;
+ struct path path, *pathp = NULL;
int ret;
cmds = cmd >> SUBCMDSHIFT;
@@ -351,12 +349,27 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
return -ENODEV;
}
+ /*
+ * Path for quotaon has to be resolved before grabbing superblock
+ * because that gets s_umount sem which is also possibly needed by path
+ * resolution (think about autofs) and thus deadlocks could arise.
+ */
+ if (cmds == Q_QUOTAON) {
+ ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+ if (ret)
+ pathp = ERR_PTR(ret);
+ else
+ pathp = &path;
+ }
+
sb = quotactl_block(special);
if (IS_ERR(sb))
return PTR_ERR(sb);
- ret = do_quotactl(sb, type, cmds, id, addr);
+ ret = do_quotactl(sb, type, cmds, id, addr, pathp);
drop_super(sb);
+ if (pathp && !IS_ERR(pathp))
+ path_put(pathp);
return ret;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 2575682..0aab04f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -632,7 +632,7 @@ static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
static int reiserfs_mark_dquot_dirty(struct dquot *);
static int reiserfs_write_info(struct super_block *, int);
-static int reiserfs_quota_on(struct super_block *, int, int, char *);
+static int reiserfs_quota_on(struct super_block *, int, int, struct path *);
static const struct dquot_operations reiserfs_quota_operations = {
.write_dquot = reiserfs_write_dquot,
@@ -2048,25 +2048,21 @@ static int reiserfs_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
- char *name)
+ struct path *path)
{
int err;
- struct path path;
struct inode *inode;
struct reiserfs_transaction_handle th;
if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
return -EINVAL;
- err = kern_path(name, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
/* Quotafile not on the same filesystem? */
- if (path.mnt->mnt_sb != sb) {
+ if (path->mnt->mnt_sb != sb) {
err = -EXDEV;
goto out;
}
- inode = path.dentry->d_inode;
+ inode = path->dentry->d_inode;
/* We must not pack tails for quota files on reiserfs for quota IO to work */
if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
err = reiserfs_unpack(inode, NULL);
@@ -2082,7 +2078,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
/* Journaling quota? */
if (REISERFS_SB(sb)->s_qf_names[type]) {
/* Quotafile not of fs root? */
- if (path.dentry->d_parent != sb->s_root)
+ if (path->dentry->d_parent != sb->s_root)
reiserfs_warning(sb, "super-6521",
"Quota file not on filesystem root. "
"Journalled quota will not work.");
@@ -2101,9 +2097,8 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
if (err)
goto out;
}
- err = dquot_quota_on_path(sb, type, format_id, &path);
+ err = dquot_quota_on(sb, type, format_id, path);
out:
- path_put(&path);
return err;
}
diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig
index f4b6758..8c41fea 100644
--- a/fs/sysfs/Kconfig
+++ b/fs/sysfs/Kconfig
@@ -1,5 +1,5 @@
config SYSFS
- bool "sysfs file system support" if EMBEDDED
+ bool "sysfs file system support" if EXPERT
default y
help
The sysfs filesystem is a virtual filesystem that the kernel uses to
OpenPOWER on IntegriCloud