Need help to add 64bit MMAP device in dtb - linux-kernel

Could someone help to enable 64bit register mmap device to ARM64 linux kernel dtb.
In my platform linux kernel device driver, *(res)->start uses 1st entry of 'reg' dtb and *resource_size(res) uses 2nd entry of 'reg' dtb.
dev: dev#8000000000 {
#address-cells = <2>;
#size-cells = <1>;
compatible = "nxp,dev-1.0";
reg-names = "dev0", "dev1";
reg = <0x00000080 0x00000000 0x01FFFFFF>,//Inst0 0x80_0000_0000 to 0x80_1FFF_FFFF
<0x00000080 0x50000000 0x01FFFFFF>;//Inst1 0x80_5000_0000 to 0x80_6FFF_FFFF
interrupts = <0 170 0>;
interrupt-parent = <&intc>;
};
Driver:
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
dev_err(device, "resource %s not found!\n", name);
return -ENODEV;
}
printk("sizeof((*res)->start) %d \n", sizeof((*res)->start));
printk("sizeof((*res)->end) %d \n", sizeof((*res)->end));
printk("\n\nNAME %s, (*res)->start 0x%llx, resource_size(*res) 0x%llx dev_name(device) %s \n\n", name, (*res)->start, resource_size(*res), dev_name(device));
region = devm_request_mem_region(device, (*res)->start,
resource_size(*res), dev_name(device));
Log:
sizeof((*res)->start) 8
sizeof((*res)->end) 8
NAME dev0, (*res)->start 0x80, resource_size(*res) 0x0 dev_name(device) 80.dev
80.dev: unable to request dev0
request_and_map() failed for dev0
sizeof((*res)->start) 8
sizeof((*res)->end) 8
NAME dev1, (*res)->start 0x1ffffff, resource_size(*res) 0x80 dev_name(device) 80.dev
80.dev: unable to request dev1

Related

How can I use the 'no-map' property of reserved-memory in device tree? Still accessed with virtual address

I have this device tree.
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
axpu_reserved_mem: axpursvd#90000000 {
no-map;
reg = <0x0 0x90000000 0x0 0x30000000>;
};
};
axpu#50000000 {
compatible = "ab21-axpu";
reg = <0 0x50000000 0 0x10000000>;
...
memory-region = <&axpu_reserved_mem>;
};
With simple test, in the probe function, I did something like this (reduced).
struct axpu_dev {
struct device *dev;
void __iomem *base;
u64 paddr;
u64 vaddr;
};
static int axpu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct axpu_dev *axpu;
struct device_node *np;
int rc;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
axpu = devm_kzalloc(dev, sizeof(*axpu), GFP_KERNEL);
axpu->dev = dev;
axpu->base = devm_ioremap(dev, res->start, resource_size(res));
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
devm_request_irq(dev, res->start, axpu_irq_handler, IRQF_TRIGGER_HIGH, "axpu_irq", axpu);
np = of_parse_phandle(dev->of_node, "memory-region", 0);
rc = of_address_to_resource(np, 0, res);
axpu->paddr = res->start;
axpu->vaddr = memremap(res->start, resource_size(res), MEMREMAP_WB);
platform_set_drvdata(pdev, axpu);
axpu_init(axpu);
return sysfs_create_group(&dev->kobj, &axpu_attr_group);
}
and in the axpu_init function, I access it with virtual address.
static void axpu_init(struct axpu_dev *axpu)
{
printk("testing reserved memory ..\n");
writel_relaxed(0x12345678, axpu->vaddr + 0);
writel_relaxed(0x23456789, axpu->vaddr + 8);
printk("read-back data = %llx, %llx\n", readl_relaxed(axpu->vaddr + 0), readl_relaxed(axpu->vaddr + 8));
}
When I execute it, it runs ok.
/ # insmod axpu.ko
axpu_probe called!
MEM : res->start = 50000000, res->end = 5fffffff, res->name = axpu#50000000
axpu->base = ffffffc010000000
IRQ : res->start = 15, res->end = 15, res->name = axpu#50000000
axpu_dev 50000000.axpu: Allocated reserved memory, vaddr: 0xFFFFFFC080000000, paddr: 0x90000000
writing 0x12345678 at non-mapped reserved memory 0x90000000, 0x90000008 ..
read-back data = 12345678, 23456789
I have put no-map in the reserved memory sub-node. But is accessible with virtual address.
But if I try it with physical address, it crashes as below.
------------[ cut here ]------------
Ignoring spurious kernel translation fault at virtual address 0000000090000000
WARNING: CPU: 0 PID: 27 at arch/arm64/mm/fault.c:311 __do_kernel_fault+0x108/0x150
odules linked in: axpu(+)
CPU: 0 PID: 27 Comm: insmod Not tainted 5.10.0-rc5 #548
Hardware name: ETRI ab21m (DT)
pstate: 60400009 (nZCv daif +PAN -UAO -TCO BTYPE=--)
pc : __do_kernel_fault+0x108/0x150
lr : __do_kernel_fault+0x108/0x150
The document says about no-map :
no-map (optional) - empty property
- Indicates the operating system must not create a virtual mapping
of the region as part of its standard mapping of system memory,
nor permit speculative access to it under any circumstances other
than under the control of the device driver using the region.
Then what is wrong with my device tree?

How to reserve physical memory in kernel (arm64)

I want to reserve some memory to save kernel information. I copied reserve_crashkernel function to arm64 and modified it:
/* 16M alignment for crash kernel regions */
#define CRASH_ALIGN (16 << 20)
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
};
static void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base, total_mem;
int ret;
crash_size = CRASH_ALIGN;
total_mem = memblock_phys_mem_size();
pr_info("crashkernel find memory %x - %llx.\n", CRASH_ALIGN, memblock_end_of_DRAM());
crash_base = memblock_find_in_range(CRASH_ALIGN, memblock_end_of_DRAM(),
crash_size, CRASH_ALIGN);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
}
ret = memblock_reserve(crash_base, crash_size);
if (ret) {
pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
return;
}
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crash_base >> 20),
(unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
}
When the kernel started, I can find kernel print like this:
[ 0.000000] crashkernel find memory 1000000 - 210000000.
[ 0.000000] Reserving 16MB of memory at 8272MB for crashkernel (System RAM: 8190MB)
But the /proc/iomem doesn't seem right. Without my code there is a 'System RAM' region:
100000000-20fffffff : System RAM
Now with reserve_crashkernel, the region changed to:
205000000-205ffffff : Crash kernel
I don't why the 'System RAM' region disappeared and I'm not sure that my code is correct.

on PowerPC P2040/E500mc the LD instruction EA causing kernel panic during PCI card pull out

Everything I have read so far points to the fact that when accessing PCI address space during card pull out will cause kernel panic if not handled in the kernel machine_check_handler. The machine_check_handler for e500mc looks for the EA(Effective Address) of the instruction in the MCSRR0 register and compares it agains PCI address space. However, since this address (EA) was not in PCI address space, caused the kernel panic eventually, as it could not be handled in the machine check interrupt handler as the address was some bad address that was stored by CPU in the MCSRR0.
Although the GPRs are all pointing to PCI address space BAR addresses from previous cpu instructions, but the Effective Address stored in the MCSRR0 register is the same invalid physical address that the NIP is pointing to...
The MCSRR1 points to machine state (MSR) at the point of interrupt and shows LD|GLD bits set along with MCSRR1[RI] bit. so its a recoverable synchronous interrupt.
And since the CPU address access was on an external hot-plugged device we need not crash the system even if the device is not present and hence the kernel check and safe return from interrupt.
I have a few questions regarding this issue:
Which GPRs are used to determine the effective address of the LD instruction. The LD bit is set in MCSR register? How do I tell which addressing mode was used for generating the effective address for the LD instruction?
the LD instruction uses rD,rA,rB operands, how do i find which EA calculation mode is being used by the processor. Apparently there are 4 of them. Also, which GPR's do each of these operands point to? I couldn't figure it out from the E500MCRM or powepc EREF.
Since we are writing to PCI address space from user space, the PCI device registers are mapped to some virtual address space in the process memory to which we are writing. This is non cached mapping as far as i know.
Does the CPU address translation to PCI device physical address, for accessing the PCI device result in bad address as the PCI device is no longer connected. My assumption for this was, since the device is no longer present the effective address returned was some junk value that caused this kernel panic. I am not sure if that's how CPU works.
Any suggestions helping my understanding are welcome. this is way deep down and beyond my expertise. I have gone through the E500MCRM, P2040RM and powerpc EREF but I cannot figure out why I am getting a bad address instead of a PCI physical address in the Effective address.
kernel - crash dump
fujitsu:~$ fsl_pci_mcheck_exception-> SPRN_MCAR: 0x0
fsl_pci_mcheck_exception-> SPRN_MCSRR0: 0x0f6fec68
fsl_pci_mcheck_exception-> SPRN_MCSRR1: 0x2d002
fsl_pci_mcheck_exception-> SPRN_MCAR: 0x0
fsl_pci_mcheck_exception-> SPRN_DEAR: 0x0
fsl_pci_mcheck_exception-> current->pid: [8333]
fsl_pci_mcheck_exception-> after __get_user_inatomic(inst, &regs->nip): 0x0f6fec68(inst), 0x0f6fec68(regs->nip), 0x0(ret)
Machine check in kernel mode.
Caused by (from MCSR=a000): Load Error Report
Guarded Load Error Report
Oops: Machine check, sig: 7 [#1]
PREEMPT SMP NR_CPUS=4 P2041 RDB
Modules linked in: i2cBridge(O) interruptDriver_pb(O) cma_alloc(O) hwtp_drv(O) interruptDriver_wdt(O)
NIP: 0f6fec68 LR: 0f6fec4c CTR: 0f6faad4
REGS: e4ec5f10 TRAP: 0204 Tainted: G O (3.8.13-rt9+)
MSR: 0002d002 <CE,EE,PR,ME> CR: 40044442 XER: 20000000
TASK = e57dc020[8333] 'RxManager' THREAD: e4ec4000 CPU: 3
GPR00: 0f6fec4c 52afea90 52b06910 50400000 52afeb50 00000003 a0105210 52afebfc
GPR08: a1ffffff a0000000 0000000c a0000000 20044448 1032e800 52900000 00000006
GPR16: 0f74f434 0f729d20 135a78a0 00200000 0fe28280 52aff4b0 00000000 0fe2a6c8
GPR24: 52afec98 0f6cd268 135a7630 00105210 52afebfc 50400000 0f71d31c 00000003
NIP [0f6fec68] 0xf6fec68
LR [0f6fec4c] 0xf6fec4c
Call Trace:
---[ end trace 2715d0da39427f69 ]---
here's the code from fsl_pci.c that's getting called from machine_check_handler
#ifdef CONFIG_E500
static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
{
unsigned int rd, ra, rb, d;
rd = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
d = get_d(inst);
printk(KERN_EMERG "%s==> rd==0x%x, ra=0x%x, rb=0x%x, d=0x%x\n", __FUNCTION__, rd, ra, rb, d);
printk(KERN_EMERG "%s==> get_op(inst) = 0x%x\n", __FUNCTION__, get_op(inst));
return 1;
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_LWZX:
case OP_31_XOP_LWBRX:
regs->gpr[rd] = 0xffffffff;
break;
case OP_31_XOP_LWZUX:
regs->gpr[rd] = 0xffffffff;
regs->gpr[ra] += regs->gpr[rb];
break;
case OP_31_XOP_LBZX:
regs->gpr[rd] = 0xff;
break;
case OP_31_XOP_LBZUX:
regs->gpr[rd] = 0xff;
regs->gpr[ra] += regs->gpr[rb];
break;
case OP_31_XOP_LHZX:
case OP_31_XOP_LHBRX:
regs->gpr[rd] = 0xffff;
break;
case OP_31_XOP_LHZUX:
regs->gpr[rd] = 0xffff;
regs->gpr[ra] += regs->gpr[rb];
break;
default:
return 0;
}
break;
case OP_LWZ:
regs->gpr[rd] = 0xffffffff;
break;
case OP_LWZU:
regs->gpr[rd] = 0xffffffff;
regs->gpr[ra] += (s16)d;
break;
case OP_LBZ:
regs->gpr[rd] = 0xff;
break;
case OP_LBZU:
regs->gpr[rd] = 0xff;
regs->gpr[ra] += (s16)d;
break;
case OP_LHZ:
regs->gpr[rd] = 0xffff;
break;
case OP_LHZU:
regs->gpr[rd] = 0xffff;
regs->gpr[ra] += (s16)d;
break;
default:
return 0;
}
return 1;
}
static int is_in_pci_mem_space(phys_addr_t addr)
{
struct pci_controller *hose;
struct resource *res;
int i;
list_for_each_entry(hose, &hose_list, list_node) {
if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
continue;
for (i = 0; i < 3; i++) {
res = &hose->mem_resources[i];
if ((res->flags & IORESOURCE_MEM) &&
addr >= res->start && addr <= res->end)
printk(KERN_EMERG "%s ==> returning from checking addresses\n", __FUNCTION__);
return 1;
}
}
printk(KERN_EMERG "%s ==> returning without checking addresses\n", __FUNCTION__);
return 1;
}
int fsl_pci_mcheck_exception(struct pt_regs *regs)
{
u32 inst;
int ret;
phys_addr_t addr = 0;
/* Let KVM/QEMU deal with the exception */
if (regs->msr & MSR_GS)
return 0;
#ifdef CONFIG_PHYS_64BIT
addr = mfspr(SPRN_MCARU);
addr <<= 32;
#endif
addr += mfspr(SPRN_MCSRR0);
printk(KERN_EMERG "%s-> SPRN_MCAR: 0x%x\n", __FUNCTION__, addr);
printk(KERN_EMERG "%s-> SPRN_MCSRR0: 0x%x\n", __FUNCTION__, mfspr(SPRN_MCSRR0));
printk(KERN_EMERG "%s-> SPRN_MCSRR1: 0x%x\n", __FUNCTION__, mfspr(SPRN_MCSRR1));
printk(KERN_EMERG "%s-> current->pid: 0x%x\n", __FUNCTION__, current->pid);
#ifdef CONFIG_E500
if (mfspr(SPRN_EPCR) & SPRN_EPCR_ICM)
addr = PFN_PHYS(vmalloc_to_pfn((void *)mfspr(SPRN_DEAR)));
printk(KERN_EMERG "%s-> SPRN_DEAR: 0x%x\n", __FUNCTION__, addr);
#endif
printk(KERN_EMERG "%s-> before get_user: 0x%x, 0x%x\n", __FUNCTION__, regs->nip, inst);
if (is_in_pci_mem_space(addr)) {
if (user_mode(regs)) {
pagefault_disable();
/* I am using __get_user_inatomic to get the instruction from the user
space as any other get_user versions were resulting in -EFAULT as they can
sleep and this needs to be called from user context and we are in interrupt
context.
*/
ret = __get_user_inatomic(inst, &regs->nip);
pagefault_enable();
} else {
ret = probe_kernel_address(regs->nip, inst);
}
printk(KERN_EMERG "%s-> after get_user: 0x%x, 0x%x, 0x%d\n", __FUNCTION__, regs->nip, inst, ret);
if (mcheck_handle_load(regs, inst)) {
regs->nip += 4;
printk(KERN_EMERG "%s-> after mcheck_handle load: 0x%x, 0x%x\n", __FUNCTION__, regs->nip, inst);
return 1;
}
}
return 0;
}
#endif
Here's the code I added to fix the kernel panic. Looks like regs->gpr[0] is destination address of the LD instruction and incrementing the instruction pointer took care of the return from the interrupt context cleanly. I still have the issue of verifying that this interrupt originated due to access of PCI device address. Right now I have commented out the PCI address range check and without this check I am able to access any address without crashing the system which is even worse.
Yes. Even a null pointer access doesn't crash the system anymore. I tried it with devmem2 and accessed a nullpointer and the call goes through the interrupt and returns safely after dumping the logs from the interrupt handler.
regs->gpr[0] = 0xffffffff;
regs->nip += 4;
return 1;
if (mcheck_handle_load(regs, inst)) {

Identify two PHY Ethernet controllers on MII bus in u-boot, DTS or kernel driver?

The issue that I am looking a solution for is that I have to upgrade the Ethernet controller on my motherboard because the component is going obsolete.
The board is running u-boot and kernel 2.6 with a device tree.
I have to mention that the new Ethernet chip is placed on the new motherboard on a different address location than previous:
phy-handle = <&PHY1>; -old address register,
phy-handle = <&PHY0>; -new address register:
dts file:
mdio#e00 {
compatible = "fsl,mpc885-fec-mdio", "fsl,pq1-fec-mdio";
reg = <0xe00 0x188>;
#address-cells = <1>;
#size-cells = <0>;
PHY0: ethernet-phy#0 {
reg = <0x0>;
device_type = "ethernet-phy";
};
PHY1: ethernet-phy#1 {
reg = <0x1>;
device_type = "ethernet-phy";
};
PHY2: ethernet-phy#2 {
reg = <0x2>;
device_type = "ethernet-phy";
};
...
EMAC0: ethernet#1e00 {
device_type = "network";
compatible = "fsl,mpc885-fec-enet",
"fsl,pq1-fec-enet";
reg = <0x1e00 0x188>;
mac-address = [ 00 00 00 00 00 00 ];
interrupts = <7 1>;
interrupt-parent = <&PIC>;
phy-handle = <&PHY0>;
linux,network-index = <0>;
};
The constrain is that I do not want have 2 separate builds for each board, but a smart piece of code that identifies which Ethernet controller is present on the motherboard.
Now my question is where this identification code should be added?, in u-boot?, in the device tree file?, or in the kernel Ethernet driver?
What I have noticed during the debugging process, is that compared to the kernel that ONLY loads the driver specified by the device tree, the u-boot is smart enough to scan the mii bus and identify the Ethernet controller.
thanks,
regards

linux driver, port 2.6.19.2 - 2.6.38-rc2 ARM11 iMX31, amba MBX device LogicPD Litekit GLES driver

Code followed with question
#define MBX_REG_SYS_PHYS_BASE 0xC0000000
#define MBX_REG_RANGE 0x00004000
static struct resource mxc_reg_resources[] = {
{
.start = MBX_REG_SYS_PHYS_BASE,
.end = MBX_REG_SYS_PHYS_BASE + MBX_REG_RANGE - 1,
.flags = IORESOURCE_MEM }
};
mbx_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mbx_reg)
return -EINVAL;
reg_base = ioremap(mbx_reg->start, resource_size(mbx_reg));
if (!reg_base) {
ret = -ENOMEM;
goto eremap;
}
printk(KERN_CRIT "Address: from 0x%08X to 0x%08X\n",
mbx_reg->start, reg_base);
regread = mx3reg_read_reg(mx3reg, MBX1_GLOBREG_REVISION);
printk(KERN_CRIT "MBX1_GLOBREG_REVISION: 0x%.8X\n", regread);
This code works on iMX31 from LogicPD using 2.6.19.2 with out of tree patching from freescale.
when porting it to 2.6.38-rc2 it no longer works.
here are some data results:
Working results:
Address: 0xC7860000
MBX1_GLOBREG_REVISION: 0x01010200
Failed results:
Address: 0xC48A0000
MBX1_GLOBREG_REVISION: 0x00000000
Address: 0xC48A8000
MBX1_GLOBREG_REVISION: 0x00000000
Address: 0xC48B8000
MBX1_GLOBREG_REVISION: 0x00000000
Address: 0xC48C0000
MBX1_GLOBREG_REVISION: 0x00000000
maybe interesting is on 2.6.19.2 it always gets the same address mapped
yet in 2.6.38-rc2 it does not.
Are you sure your defines are still good ? The output for this line should not change :
printk(KERN_CRIT "Address: from 0x%08X to 0x%08X\n",
mbx_reg->start, reg_base);
Since it is a physical address. However it is not printed in your output.
Check the pripheral you are accessing is clocked.
In order to have this device ready to communicate you need to setup the peripheral port remap register
/* Setup Peripheral Port Remap register for AVIC */
asm("ldr r0, =0xC0000015 \n\
mcr p15, 0, r0, c15, c2, 4");
here is the code from the original 2.6.19.2 kernel, executed from a board fixup routine.
and of course the clocks would have to be enabled as well, and this driver example is not showing that either.

Resources