@@ -641,187 +641,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
spin_unlock_irqrestore(&data->lock, flags);
}
-static const struct iommu_ops exynos_iommu_ops;
-
-static int exynos_sysmmu_probe(struct platform_device *pdev)
-{
- int irq, ret;
- struct device *dev = &pdev->dev;
- struct sysmmu_drvdata *data;
- struct resource *res;
-
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->sfrbase = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->sfrbase))
- return PTR_ERR(data->sfrbase);
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq;
-
- ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
- dev_name(dev), data);
- if (ret) {
- dev_err(dev, "Unabled to register handler of irq %d\n", irq);
- return ret;
- }
-
- data->clk = devm_clk_get(dev, "sysmmu");
- if (PTR_ERR(data->clk) == -ENOENT)
- data->clk = NULL;
- else if (IS_ERR(data->clk))
- return PTR_ERR(data->clk);
-
- data->aclk = devm_clk_get(dev, "aclk");
- if (PTR_ERR(data->aclk) == -ENOENT)
- data->aclk = NULL;
- else if (IS_ERR(data->aclk))
- return PTR_ERR(data->aclk);
-
- data->pclk = devm_clk_get(dev, "pclk");
- if (PTR_ERR(data->pclk) == -ENOENT)
- data->pclk = NULL;
- else if (IS_ERR(data->pclk))
- return PTR_ERR(data->pclk);
-
- if (!data->clk && (!data->aclk || !data->pclk)) {
- dev_err(dev, "Failed to get device clock(s)!\n");
- return -ENOENT;
- }
-
- data->clk_master = devm_clk_get(dev, "master");
- if (PTR_ERR(data->clk_master) == -ENOENT)
- data->clk_master = NULL;
- else if (IS_ERR(data->clk_master))
- return PTR_ERR(data->clk_master);
-
- data->sysmmu = dev;
- spin_lock_init(&data->lock);
-
- __sysmmu_get_version(data);
-
- ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
- dev_name(data->sysmmu));
- if (ret)
- return ret;
-
- ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
- if (ret)
- goto err_iommu_register;
-
- platform_set_drvdata(pdev, data);
-
- if (PG_ENT_SHIFT < 0) {
- if (MMU_MAJ_VER(data->version) < 5) {
- PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
- LV1_PROT = SYSMMU_LV1_PROT;
- LV2_PROT = SYSMMU_LV2_PROT;
- } else {
- PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
- LV1_PROT = SYSMMU_V5_LV1_PROT;
- LV2_PROT = SYSMMU_V5_LV2_PROT;
- }
- }
-
- if (MMU_MAJ_VER(data->version) >= 5) {
- ret = dma_set_mask(dev, DMA_BIT_MASK(36));
- if (ret) {
- dev_err(dev, "Unable to set DMA mask: %d\n", ret);
- goto err_dma_set_mask;
- }
- }
-
- /*
- * use the first registered sysmmu device for performing
- * dma mapping operations on iommu page tables (cpu cache flush)
- */
- if (!dma_dev)
- dma_dev = &pdev->dev;
-
- pm_runtime_enable(dev);
-
- return 0;
-
-err_dma_set_mask:
- iommu_device_unregister(&data->iommu);
-err_iommu_register:
- iommu_device_sysfs_remove(&data->iommu);
- return ret;
-}
-
-static void exynos_sysmmu_shutdown(struct platform_device *pdev)
-{
- struct sysmmu_drvdata *data = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- int irq = platform_get_irq(pdev, 0);
-
- devm_free_irq(dev, irq, data);
- pm_runtime_force_suspend(dev);
-}
-
-static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
-{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev);
- struct device *master = data->master;
-
- if (master) {
- struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
-
- mutex_lock(&owner->rpm_lock);
- if (data->domain) {
- dev_dbg(data->sysmmu, "saving state\n");
- __sysmmu_disable(data);
- }
- mutex_unlock(&owner->rpm_lock);
- }
- return 0;
-}
-
-static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
-{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev);
- struct device *master = data->master;
-
- if (master) {
- struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
-
- mutex_lock(&owner->rpm_lock);
- if (data->domain) {
- dev_dbg(data->sysmmu, "restoring state\n");
- __sysmmu_enable(data);
- }
- mutex_unlock(&owner->rpm_lock);
- }
- return 0;
-}
-
-static const struct dev_pm_ops sysmmu_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
-
-static const struct of_device_id sysmmu_of_match[] = {
- { .compatible = "samsung,exynos-sysmmu", },
- { },
-};
-MODULE_DEVICE_TABLE(of, sysmmu_of_match);
-
-static struct platform_driver exynos_sysmmu_driver = {
- .probe = exynos_sysmmu_probe,
- .shutdown = exynos_sysmmu_shutdown,
- .driver = {
- .name = "exynos-sysmmu",
- .of_match_table = sysmmu_of_match,
- .pm = &sysmmu_pm_ops,
- .suppress_bind_attrs = true,
- }
-};
-
static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
@@ -1428,6 +1247,185 @@ static const struct iommu_ops exynos_iommu_ops = {
}
};
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+ int irq, ret;
+ struct device *dev = &pdev->dev;
+ struct sysmmu_drvdata *data;
+ struct resource *res;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->sfrbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->sfrbase))
+ return PTR_ERR(data->sfrbase);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, dev_name(dev),
+ data);
+ if (ret) {
+ dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+ return ret;
+ }
+
+ data->clk = devm_clk_get(dev, "sysmmu");
+ if (PTR_ERR(data->clk) == -ENOENT)
+ data->clk = NULL;
+ else if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ data->aclk = devm_clk_get(dev, "aclk");
+ if (PTR_ERR(data->aclk) == -ENOENT)
+ data->aclk = NULL;
+ else if (IS_ERR(data->aclk))
+ return PTR_ERR(data->aclk);
+
+ data->pclk = devm_clk_get(dev, "pclk");
+ if (PTR_ERR(data->pclk) == -ENOENT)
+ data->pclk = NULL;
+ else if (IS_ERR(data->pclk))
+ return PTR_ERR(data->pclk);
+
+ if (!data->clk && (!data->aclk || !data->pclk)) {
+ dev_err(dev, "Failed to get device clock(s)!\n");
+ return -ENOENT;
+ }
+
+ data->clk_master = devm_clk_get(dev, "master");
+ if (PTR_ERR(data->clk_master) == -ENOENT)
+ data->clk_master = NULL;
+ else if (IS_ERR(data->clk_master))
+ return PTR_ERR(data->clk_master);
+
+ data->sysmmu = dev;
+ spin_lock_init(&data->lock);
+
+ __sysmmu_get_version(data);
+
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(data->sysmmu));
+ if (ret)
+ return ret;
+
+ ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
+ if (ret)
+ goto err_iommu_register;
+
+ platform_set_drvdata(pdev, data);
+
+ if (PG_ENT_SHIFT < 0) {
+ if (MMU_MAJ_VER(data->version) < 5) {
+ PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
+ LV1_PROT = SYSMMU_LV1_PROT;
+ LV2_PROT = SYSMMU_LV2_PROT;
+ } else {
+ PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+ LV1_PROT = SYSMMU_V5_LV1_PROT;
+ LV2_PROT = SYSMMU_V5_LV2_PROT;
+ }
+ }
+
+ if (MMU_MAJ_VER(data->version) >= 5) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+ if (ret) {
+ dev_err(dev, "Unable to set DMA mask: %d\n", ret);
+ goto err_dma_set_mask;
+ }
+ }
+
+ /*
+ * use the first registered sysmmu device for performing
+ * dma mapping operations on iommu page tables (cpu cache flush)
+ */
+ if (!dma_dev)
+ dma_dev = &pdev->dev;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_dma_set_mask:
+ iommu_device_unregister(&data->iommu);
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
+}
+
+static void exynos_sysmmu_shutdown(struct platform_device *pdev)
+{
+ struct sysmmu_drvdata *data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq(pdev, 0);
+
+ devm_free_irq(dev, irq, data);
+ pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+ struct device *master = data->master;
+
+ if (master) {
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
+
+ mutex_lock(&owner->rpm_lock);
+ if (data->domain) {
+ dev_dbg(data->sysmmu, "saving state\n");
+ __sysmmu_disable(data);
+ }
+ mutex_unlock(&owner->rpm_lock);
+ }
+ return 0;
+}
+
+static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+ struct device *master = data->master;
+
+ if (master) {
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
+
+ mutex_lock(&owner->rpm_lock);
+ if (data->domain) {
+ dev_dbg(data->sysmmu, "restoring state\n");
+ __sysmmu_enable(data);
+ }
+ mutex_unlock(&owner->rpm_lock);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops sysmmu_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id sysmmu_of_match[] = {
+ { .compatible = "samsung,exynos-sysmmu", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sysmmu_of_match);
+
+static struct platform_driver exynos_sysmmu_driver = {
+ .probe = exynos_sysmmu_probe,
+ .shutdown = exynos_sysmmu_shutdown,
+ .driver = {
+ .name = "exynos-sysmmu",
+ .of_match_table = sysmmu_of_match,
+ .pm = &sysmmu_pm_ops,
+ .suppress_bind_attrs = true,
+ }
+};
+
static int __init exynos_iommu_init(void)
{
struct device_node *np;
@@ -1461,6 +1459,7 @@ static int __init exynos_iommu_init(void)
}
return 0;
+
err_zero_lv2:
platform_driver_unregister(&exynos_sysmmu_driver);
err_reg_driver: