Commit 6a35528a authored by Yang Hongyang's avatar Yang Hongyang Committed by Linus Torvalds
Browse files

dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)



Replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)

Signed-off-by: Yang Hongyang<yanghy@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8a59f5d2
......@@ -307,7 +307,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
static u64 iop13xx_adma_dmamask = DMA_64BIT_MASK;
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
......@@ -331,7 +331,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
......@@ -343,7 +343,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
......@@ -355,7 +355,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};
......
......@@ -151,7 +151,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
u64 iop13xx_tpmi_mask = DMA_64BIT_MASK;
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
......@@ -159,7 +159,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
......@@ -170,7 +170,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
......@@ -181,7 +181,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
......@@ -192,7 +192,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
......
......@@ -559,7 +559,7 @@ static struct platform_device kirkwood_xor00_channel = {
.resource = kirkwood_xor00_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor00_data,
},
};
......@@ -585,7 +585,7 @@ static struct platform_device kirkwood_xor01_channel = {
.resource = kirkwood_xor01_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor01_data,
},
};
......@@ -657,7 +657,7 @@ static struct platform_device kirkwood_xor10_channel = {
.resource = kirkwood_xor10_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor10_data,
},
};
......@@ -683,7 +683,7 @@ static struct platform_device kirkwood_xor11_channel = {
.resource = kirkwood_xor11_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor11_data,
},
};
......
......@@ -486,7 +486,7 @@ static struct platform_device orion5x_xor0_channel = {
.resource = orion5x_xor0_resources,
.dev = {
.dma_mask = &orion5x_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&orion5x_xor0_data,
},
};
......@@ -512,7 +512,7 @@ static struct platform_device orion5x_xor1_channel = {
.resource = orion5x_xor1_resources,
.dev = {
.dma_mask = &orion5x_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&orion5x_xor1_data,
},
};
......
......@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
......@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
......@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop3xx_aau_data,
},
};
......
......@@ -349,7 +349,7 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_64BIT_MASK;
return DMA_BIT_MASK(64);
}
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
......
......@@ -644,7 +644,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
static int dma_fixed_dma_supported(struct device *dev, u64 mask)
{
return mask == DMA_64BIT_MASK;
return mask == DMA_BIT_MASK(64);
}
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
......
......@@ -2405,8 +2405,8 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
int rc;
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
......
......@@ -3913,8 +3913,8 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
......
......@@ -584,8 +584,8 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
if (have_64bit_bus &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
......
......@@ -1297,8 +1297,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host->iomap = iomap;
/* configure and activate the device */
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
......
......@@ -1372,8 +1372,8 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
dma_addr_t CommandMailboxDMA;
DAC960_V2_CommandStatus_T CommandStatus;
if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK))
Controller->BounceBufferLimit = DMA_64BIT_MASK;
if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)))
Controller->BounceBufferLimit = DMA_BIT_MASK(64);
else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK))
Controller->BounceBufferLimit = DMA_32BIT_MASK;
else
......
......@@ -3637,7 +3637,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
hba[i]->pdev = pdev;
/* configure PCI DMA stuff */
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
dac = 1;
else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
dac = 0;
......
......@@ -1586,9 +1586,9 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
pci_name(pdev));
......
......@@ -829,7 +829,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
dev_printk(KERN_INFO, &dev->dev,
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) &&
pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
return -ENOMEM;
......
......@@ -98,13 +98,13 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
if (err)
goto err_request_regions;
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err)
goto err_set_dma_mask;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (err)
......
......@@ -178,7 +178,7 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl,
static struct device dummy_dma_dev = {
.init_name = "fallback device",
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.dma_mask = &dummy_dma_dev.coherent_dma_mask,
};
......
......@@ -989,7 +989,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
}
if ((sizeof(dma_addr_t) > 4)) {
ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret < 0) {
printk(KERN_ERR PFX "64b DMA configuration failed\n");
goto bail2;
......
......@@ -470,7 +470,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail_disable;
}
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret) {
/*
* if the 64 bit setup fails, try 32 bit. Some systems
......@@ -496,7 +496,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
}
}
else {
ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret)
dev_info(&pdev->dev,
"Unable to set DMA consistent mask "
......
......@@ -1016,7 +1016,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
......@@ -1025,7 +1025,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_res;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment