[POWERPC] Change rheap functions to use ulongs instead of pointers
The rheap allocation functions return a pointer, but the actual value is based on how the heap was initialized, and so it can be anything, e.g. an offset into a buffer. A ulong is a better representation of the value returned by the allocation functions. This patch changes all of the relevant rheap functions to use a unsigned long integers instead of a pointer. In case of an error, the value returned is a negative error code that has been cast to an unsigned long. The caller can use the IS_ERR_VALUE() macro to check for this. All code which calls the rheap functions is updated accordingly. Macros IS_MURAM_ERR() and IS_DPERR(), have been deleted in favor of IS_ERR_VALUE(). Also added error checking to rh_attach_region(). Signed-off-by: Timur Tabi <timur@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
This commit is contained in:
@@ -330,7 +330,7 @@ void m8xx_cpm_dpinit(void)
|
||||
* with the processor and the microcode patches applied / activated.
|
||||
* But the following should be at least safe.
|
||||
*/
|
||||
rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
|
||||
rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -338,9 +338,9 @@ void m8xx_cpm_dpinit(void)
|
||||
* This function returns an offset into the DPRAM area.
|
||||
* Use cpm_dpram_addr() to get the virtual address of the area.
|
||||
*/
|
||||
uint cpm_dpalloc(uint size, uint align)
|
||||
unsigned long cpm_dpalloc(uint size, uint align)
|
||||
{
|
||||
void *start;
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpm_dpmem_lock, flags);
|
||||
@@ -352,30 +352,30 @@ uint cpm_dpalloc(uint size, uint align)
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpalloc);
|
||||
|
||||
int cpm_dpfree(uint offset)
|
||||
int cpm_dpfree(unsigned long offset)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpm_dpmem_lock, flags);
|
||||
ret = rh_free(&cpm_dpmem_info, (void *)offset);
|
||||
ret = rh_free(&cpm_dpmem_info, offset);
|
||||
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpfree);
|
||||
|
||||
uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
|
||||
unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
|
||||
{
|
||||
void *start;
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpm_dpmem_lock, flags);
|
||||
cpm_dpmem_info.alignment = align;
|
||||
start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
|
||||
start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
|
||||
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
|
||||
|
||||
return (uint)start;
|
||||
return start;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpalloc_fixed);
|
||||
|
||||
@@ -385,7 +385,7 @@ void cpm_dpdump(void)
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpdump);
|
||||
|
||||
void *cpm_dpram_addr(uint offset)
|
||||
void *cpm_dpram_addr(unsigned long offset)
|
||||
{
|
||||
return (void *)(dpram_vbase + offset);
|
||||
}
|
||||
|
@@ -248,15 +248,14 @@ static void cpm2_dpinit(void)
|
||||
* varies with the processor and the microcode patches activated.
|
||||
* But the following should be at least safe.
|
||||
*/
|
||||
rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE,
|
||||
CPM_DATAONLY_SIZE);
|
||||
rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
|
||||
}
|
||||
|
||||
/* This function returns an index into the DPRAM area.
|
||||
*/
|
||||
uint cpm_dpalloc(uint size, uint align)
|
||||
unsigned long cpm_dpalloc(uint size, uint align)
|
||||
{
|
||||
void *start;
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpm_dpmem_lock, flags);
|
||||
@@ -268,13 +267,13 @@ uint cpm_dpalloc(uint size, uint align)
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpalloc);
|
||||
|
||||
int cpm_dpfree(uint offset)
|
||||
int cpm_dpfree(unsigned long offset)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpm_dpmem_lock, flags);
|
||||
ret = rh_free(&cpm_dpmem_info, (void *)offset);
|
||||
ret = rh_free(&cpm_dpmem_info, offset);
|
||||
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -282,17 +281,17 @@ int cpm_dpfree(uint offset)
|
||||
EXPORT_SYMBOL(cpm_dpfree);
|
||||
|
||||
/* not sure if this is ever needed */
|
||||
uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
|
||||
unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
|
||||
{
|
||||
void *start;
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cpm_dpmem_lock, flags);
|
||||
cpm_dpmem_info.alignment = align;
|
||||
start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
|
||||
start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
|
||||
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
|
||||
|
||||
return (uint)start;
|
||||
return start;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpalloc_fixed);
|
||||
|
||||
@@ -302,7 +301,7 @@ void cpm_dpdump(void)
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_dpdump);
|
||||
|
||||
void *cpm_dpram_addr(uint offset)
|
||||
void *cpm_dpram_addr(unsigned long offset)
|
||||
{
|
||||
return (void *)(im_dprambase + offset);
|
||||
}
|
||||
|
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(qe_put_snum);
|
||||
static int qe_sdma_init(void)
|
||||
{
|
||||
struct sdma *sdma = &qe_immr->sdma;
|
||||
u32 sdma_buf_offset;
|
||||
unsigned long sdma_buf_offset;
|
||||
|
||||
if (!sdma)
|
||||
return -ENODEV;
|
||||
@@ -252,10 +252,10 @@ static int qe_sdma_init(void)
|
||||
/* allocate 2 internal temporary buffers (512 bytes size each) for
|
||||
* the SDMA */
|
||||
sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
|
||||
if (IS_MURAM_ERR(sdma_buf_offset))
|
||||
if (IS_ERR_VALUE(sdma_buf_offset))
|
||||
return -ENOMEM;
|
||||
|
||||
out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK);
|
||||
out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
|
||||
out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
|
||||
(0x1 << QE_SDMR_CEN_SHIFT)));
|
||||
|
||||
@@ -291,33 +291,32 @@ static void qe_muram_init(void)
|
||||
if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) {
|
||||
address = *of_get_address(np, 0, &size, &flags);
|
||||
of_node_put(np);
|
||||
rh_attach_region(&qe_muram_info,
|
||||
(void *)address, (int)size);
|
||||
rh_attach_region(&qe_muram_info, address, (int) size);
|
||||
}
|
||||
}
|
||||
|
||||
/* This function returns an index into the MURAM area.
|
||||
*/
|
||||
u32 qe_muram_alloc(u32 size, u32 align)
|
||||
unsigned long qe_muram_alloc(int size, int align)
|
||||
{
|
||||
void *start;
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qe_muram_lock, flags);
|
||||
start = rh_alloc_align(&qe_muram_info, size, align, "QE");
|
||||
spin_unlock_irqrestore(&qe_muram_lock, flags);
|
||||
|
||||
return (u32) start;
|
||||
return start;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_muram_alloc);
|
||||
|
||||
int qe_muram_free(u32 offset)
|
||||
int qe_muram_free(unsigned long offset)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qe_muram_lock, flags);
|
||||
ret = rh_free(&qe_muram_info, (void *)offset);
|
||||
ret = rh_free(&qe_muram_info, offset);
|
||||
spin_unlock_irqrestore(&qe_muram_lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -325,16 +324,16 @@ int qe_muram_free(u32 offset)
|
||||
EXPORT_SYMBOL(qe_muram_free);
|
||||
|
||||
/* not sure if this is ever needed */
|
||||
u32 qe_muram_alloc_fixed(u32 offset, u32 size)
|
||||
unsigned long qe_muram_alloc_fixed(unsigned long offset, int size)
|
||||
{
|
||||
void *start;
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qe_muram_lock, flags);
|
||||
start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc");
|
||||
start = rh_alloc_fixed(&qe_muram_info, offset, size, "commproc");
|
||||
spin_unlock_irqrestore(&qe_muram_lock, flags);
|
||||
|
||||
return (u32) start;
|
||||
return start;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_muram_alloc_fixed);
|
||||
|
||||
@@ -344,7 +343,7 @@ void qe_muram_dump(void)
|
||||
}
|
||||
EXPORT_SYMBOL(qe_muram_dump);
|
||||
|
||||
void *qe_muram_addr(u32 offset)
|
||||
void *qe_muram_addr(unsigned long offset)
|
||||
{
|
||||
return (void *)&qe_immr->muram[offset];
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/immap_qe.h>
|
||||
@@ -268,7 +269,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
|
||||
/* Allocate memory for Tx Virtual Fifo */
|
||||
uccf->ucc_fast_tx_virtual_fifo_base_offset =
|
||||
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
|
||||
if (IS_MURAM_ERR(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
|
||||
if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__);
|
||||
uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
|
||||
ucc_fast_free(uccf);
|
||||
@@ -280,7 +281,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
|
||||
qe_muram_alloc(uf_info->urfs +
|
||||
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
|
||||
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
|
||||
if (IS_MURAM_ERR(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
|
||||
if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__);
|
||||
uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
|
||||
ucc_fast_free(uccf);
|
||||
|
@@ -18,6 +18,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/immap_qe.h>
|
||||
@@ -175,7 +176,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
/* Get PRAM base */
|
||||
uccs->us_pram_offset =
|
||||
qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
|
||||
if (IS_MURAM_ERR(uccs->us_pram_offset)) {
|
||||
if (IS_ERR_VALUE(uccs->us_pram_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __FUNCTION__);
|
||||
ucc_slow_free(uccs);
|
||||
return -ENOMEM;
|
||||
@@ -210,7 +211,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
uccs->rx_base_offset =
|
||||
qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
|
||||
QE_ALIGNMENT_OF_BD);
|
||||
if (IS_MURAM_ERR(uccs->rx_base_offset)) {
|
||||
if (IS_ERR_VALUE(uccs->rx_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__);
|
||||
uccs->rx_base_offset = 0;
|
||||
ucc_slow_free(uccs);
|
||||
@@ -220,7 +221,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
uccs->tx_base_offset =
|
||||
qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
|
||||
QE_ALIGNMENT_OF_BD);
|
||||
if (IS_MURAM_ERR(uccs->tx_base_offset)) {
|
||||
if (IS_ERR_VALUE(uccs->tx_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate TX BDs", __FUNCTION__);
|
||||
uccs->tx_base_offset = 0;
|
||||
ucc_slow_free(uccs);
|
||||
|
Reference in New Issue
Block a user