cifs: use ALIGN() and round_up() macros

Improve code readability by using existing macros:

Replace hardcoded alignment computations (e.g. (len + 7) & ~0x7) by
ALIGN()/IS_ALIGNED() macros.

Also replace (DIV_ROUND_UP(len, 8) * 8) with ALIGN(len, 8), which, if
not optimized by the compiler, has the overhead of a multiplication
and a division. Do the same for roundup() by replacing it by round_up()
(division-less version, but requires the multiple to be a power of 2,
which is always the case for us).

And remove some unnecessary checks where !IS_ALIGNED() would fit, but
calling round_up() directly is fine as it's a no-op if the value is
already aligned.

Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Enzo Matsumiya 2022-10-12 22:53:09 -05:00 committed by Steve French
parent e4029e0726
commit d7173623bf
5 changed files with 33 additions and 38 deletions

View File

@ -2305,7 +2305,7 @@ int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon,
remap);
}
rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str);
count = sizeof(struct set_file_rename) + (2 * len_of_str);
byte_count += count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->TotalDataCount = pSMB->DataCount;

View File

@ -2832,9 +2832,12 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
* sessinit is sent but no second negprot
*/
struct rfc1002_session_packet *ses_init_buf;
unsigned int req_noscope_len;
struct smb_hdr *smb_buf;
ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
GFP_KERNEL);
if (ses_init_buf) {
ses_init_buf->trailer.session_req.called_len = 32;
@ -2870,8 +2873,12 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
ses_init_buf->trailer.session_req.scope2 = 0;
smb_buf = (struct smb_hdr *)ses_init_buf;
/* sizeof RFC1002_SESSION_REQUEST with no scope */
smb_buf->smb_buf_length = cpu_to_be32(0x81000044);
/* sizeof RFC1002_SESSION_REQUEST with no scopes */
req_noscope_len = sizeof(struct rfc1002_session_packet) - 2;
/* == cpu_to_be32(0x81000044) */
smb_buf->smb_buf_length =
cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | req_noscope_len);
rc = smb_send(server, smb_buf, 0x44);
kfree(ses_init_buf);
/*

View File

@ -601,11 +601,6 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* BB FIXME add check that strings total less
than 335 or will need to send them as arrays */
/* unicode strings, must be word aligned before the call */
/* if ((long) bcc_ptr % 2) {
*bcc_ptr = 0;
bcc_ptr++;
} */
/* copy user */
if (ses->user_name == NULL) {
/* null user mount */
@ -1324,7 +1319,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
}
if (ses->capabilities & CAP_UNICODE) {
if (sess_data->iov[0].iov_len % 2) {
if (!IS_ALIGNED(sess_data->iov[0].iov_len, 2)) {
*bcc_ptr = 0;
bcc_ptr++;
}
@ -1364,7 +1359,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++bcc_ptr;
--bytes_remaining;
}
@ -1448,8 +1443,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
if (ses->capabilities & CAP_UNICODE) {
/* unicode strings must be word aligned */
if ((sess_data->iov[0].iov_len
+ sess_data->iov[1].iov_len) % 2) {
if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
bcc_ptr++;
}
@ -1500,7 +1494,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++bcc_ptr;
--bytes_remaining;
}
@ -1552,7 +1546,7 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
bcc_ptr = sess_data->iov[2].iov_base;
/* unicode strings must be word aligned */
if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) {
if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
bcc_ptr++;
}
@ -1753,7 +1747,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++bcc_ptr;
--bytes_remaining;
}

View File

@ -248,7 +248,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
* Some windows servers (win2016) will pad also the final
* PDU in a compound to 8 bytes.
*/
if (((calc_len + 7) & ~7) == len)
if (ALIGN(calc_len, 8) == len)
return 0;
/*

View File

@ -466,15 +466,14 @@ build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
/*
* Context Data length must be rounded to multiple of 8 for some servers
*/
pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP(
sizeof(struct smb2_signing_capabilities) -
sizeof(struct smb2_neg_context) +
(num_algs * 2 /* sizeof u16 */), 8) * 8);
pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
sizeof(struct smb2_neg_context) +
(num_algs * sizeof(u16)), 8));
pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
ctxt_len += 2 /* sizeof le16 */ * num_algs;
ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8;
ctxt_len += sizeof(__le16) * num_algs;
ctxt_len = ALIGN(ctxt_len, 8);
return ctxt_len;
/* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
}
@ -511,8 +510,7 @@ build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
/* copy up to max of first 100 bytes of server name to NetName field */
pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
/* context size is DataLength + minimal smb2_neg_context */
return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
sizeof(struct smb2_neg_context), 8) * 8;
return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
}
static void
@ -557,18 +555,18 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
* round up total_len of fixed part of SMB3 negotiate request to 8
* byte boundary before adding negotiate contexts
*/
*total_len = roundup(*total_len, 8);
*total_len = ALIGN(*total_len, 8);
pneg_ctxt = (*total_len) + (char *)req;
req->NegotiateContextOffset = cpu_to_le32(*total_len);
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
@ -595,9 +593,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
if (server->compress_algorithm) {
build_compression_ctxt((struct smb2_compression_capabilities_context *)
pneg_ctxt);
ctxt_len = DIV_ROUND_UP(
sizeof(struct smb2_compression_capabilities_context),
8) * 8;
ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
neg_context_count++;
@ -780,7 +776,7 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
if (rc)
break;
/* offsets must be 8 byte aligned */
clen = (clen + 7) & ~0x7;
clen = ALIGN(clen, 8);
offset += clen + sizeof(struct smb2_neg_context);
len_of_ctxts -= clen;
}
@ -2426,7 +2422,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
unsigned int group_offset = 0;
struct smb3_acl acl;
*len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
*len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
if (set_owner) {
/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
@ -2500,7 +2496,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
memcpy(aclptr, &acl, sizeof(struct smb3_acl));
buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
*len = roundup(ptr - (__u8 *)buf, 8);
*len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
return buf;
}
@ -2594,7 +2590,7 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
* final path needs to be 8-byte aligned as specified in
* MS-SMB2 2.2.13 SMB2 CREATE Request.
*/
*out_size = roundup(*out_len * sizeof(__le16), 8);
*out_size = round_up(*out_len * sizeof(__le16), 8);
*out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
if (!*out_path)
return -ENOMEM;
@ -2839,9 +2835,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
copy_size = uni_path_len;
if (copy_size % 8 != 0)
copy_size = roundup(copy_size, 8);
copy_size = round_up(uni_path_len, 8);
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path)
return -ENOMEM;
@ -4103,7 +4097,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* next 8-byte aligned request */
*total_len = DIV_ROUND_UP(*total_len, 8) * 8;
*total_len = ALIGN(*total_len, 8);
shdr->NextCommand = cpu_to_le32(*total_len);
} else /* END_OF_CHAIN */
shdr->NextCommand = 0;