* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: memsparse.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)memsparse.c 1.2 07/03/26 SMI"
* Special purpose memory device to allow for holey memory based on
* address patterns. Only accesses to the memory device are checked,
* the device implements data for the entire range of the device and
* a backing file, if used, also contains data for the entire range.
* device "memsparse" 0x100000 +1M {
* This specifies that bits [7:6] of the address (the '0xc0') must
* be 01(binary) (the '0x40').
static void memsprse_parse(config_dev_t
*);
static void memsprse_init(config_dev_t
*);
static void memsprse_dump(config_dev_t
*);
static tpaddr_t
memsprse_cacheable(config_addr_t
*, dev_access_t type
,
tpaddr_t off
, uint8_t **cbp
);
static bool_t
memsprse_cpu_access(simcpu_t
*, config_addr_t
*, tpaddr_t off
,
maccess_t op
, uint64_t *regp
);
dev_type_t dev_type_memsparse
= {
memsprse_cpu_access
, /* shouldnt be called if we're a RAM */
uint64_t size
; /* size of memory segment */
uint64_t stride
; /* 2^mask_SLB */
uint64_t stridemask
; /* stride - 1 */
uint64_t fileextent
; /* how much of file to load in */
uint64_t fileextenta
; /* page alligned fileextent */
uint64_t fileoffset
; /* offset in file to start loading */
uint64_t memoffset
; /* offset in memory of where to load */
bool_t is_rom
; /* mmap file with MAP_PRIVATE flag */
bool_t is_shared
; /* mmap file with MAP_SHARED flag */
/* This should be in the processor structure - but is common to all now. */
memsprse_parse(config_dev_t
*config_devp
)
* Allocate the memory device and all that stuff
mdp
= Xcalloc(1, memsprse_dev_t
);
mdp
->size
= config_devp
->addrp
->range
;
DBG( PRINTF(("memsprse_parse: parsing device %d\n", config_devp
->device_id
)); );
goto finished
; /* nothing more to parse */
lex_fatal("expecting either ; or { when parsing memory device");
uint64_t startoffset
= 0LL;
bool_t is_shared
, is_rom
;
if (tok
== T_R_Brace
) break;
lex_fatal("expected load, rom, shared or mask "
"directive parsing memory device");
if (streq(lex
.strp
, "mask")) {
if (mdp
->mask
) lex_fatal("mask already specified");
lex_fatal("mask expects a number");
lex_fatal("mask must be non-zero");
lex_fatal("mask expects a second number");
if ((lex
.val
& ~mdp
->mask
) != 0)
lex_fatal("(match & ~mask) must be zero");
if (streq(lex
.strp
, "rom")) {
if (mdp
->is_rom
) lex_fatal("rom already specified");
if (streq(lex
.strp
, "shared")) {
lex_fatal("shared already specified");
if (!streq(lex
.strp
, "load")) goto fail
;
lex_fatal("load already specified");
* Get the load offset, ie. the offset from the base
* address to load this file. For disk devices, we
* don't need this as we use the size of the previous
* disk to work out where to load the next disk.
if (lex
.val
< config_devp
->addrp
->baseaddr
||
lex
.val
>= config_devp
->addrp
->topaddr
)
lex_fatal("specified load address is outside "
"the range of the memory device");
startoffset
= lex
.val
- config_devp
->addrp
->baseaddr
;
lex_fatal("Expected either a start address / offset "
"or filename for memory device load directive");
fnamep
= Xstrdup(lex
.strp
);
lex_fatal("Expected ; or file offset for memory device "
lex_fatal("load file offset must be >0 for load directive");
lex_fatal("Expected ; or load length for memory device "
lex_fatal("load length must be >=0 for load directive");
if (stat(fnamep
, &sb
) < 0)
lex_fatal("error opening load file %s", fnamep
);
flen
= sb
.st_size
- foffset
;
if (sb
.st_size
< (foffset
+ flen
))
lex_fatal("load file %s is smaller than the specified "
if ((startoffset
+ flen
) > config_devp
->addrp
->range
)
lex_fatal("load file %s is larger than the memory device",
/* OK have parsed file info - add it to the load block */
mdp
->fileextent
= flen
; /* how much of file to load in */
mdp
->fileextenta
= sim_roundup(flen
, pgsize
);
mdp
->fileoffset
= foffset
; /* offset in file to start load */
mdp
->memoffset
= startoffset
; /* offset in mem to start load */
mdp
->is_shared
= is_shared
; /* MMAP with flag MAP_SHARED */
mdp
->is_rom
= is_rom
; /* MMAP with flag MAP_PRIVATE */
lex_fatal("memsparse must have a mask directive");
if (mdp
->stride
< SS_CACHE_LINE
)
warning("memsparse mask stride 0x%llx < size of cache "
"line 0x%llx", mdp
->stride
, SS_CACHE_LINE
);
mdp
->stridemask
= mdp
->stride
- 1;
if ((config_devp
->addrp
->baseaddr
& mdp
->stridemask
) != 0)
lex_fatal("memsparse: base must be aligned to stride of "
if ((config_devp
->addrp
->range
& mdp
->stridemask
) != 0)
lex_fatal("memsparse: size must be a multiple of stride of "
* Initialise the mem after parsing is complete
* If no files have been requested, then we force a
memsprse_init(config_dev_t
*config_devp
)
mdp
= (memsprse_dev_t
*)config_devp
->devp
;
* Perform a check to see that the requested loaded files are
* a) correctly aligned, and
* b) whether we need a /dev/zero mapping to back sections
* not covered by other mmapped files (beginning, middle,
* end), or because we need to "load" files in that are
* For the moment we create a MAP_ANON block of the entire
* ram/rom then map over than any additional data files we
* Force a segv if we ever try and write to a "ROM".
DBG( PRINTF(("memory mapping : 0x%llx\n", sim_roundup(mdp
->size
, pgsize
))); );
datap
= mdp
->datap
= (void*)mmap(NULL
, sim_roundup(mdp
->size
, pgsize
),
PROT_READ
| (mdp
->is_rom
? 0 : PROT_WRITE
),
MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
, -1, 0);
if (MAP_FAILED
== datap
) fatal("Initial mmap of anon memory failed");
* Now we either map or load in all the remaining files that the
* config file may have specified.
if (mdp
->fnamep
!= NULL
) {
bool_t is_shared
= mdp
->is_shared
;
fd
= open(mdp
->fnamep
, is_shared
? O_RDWR
: O_RDONLY
);
} while (fd
< 0 && EAGAIN
== errno
);
if (fd
< 0) fatal("Failed opening file %s", mdp
->fnamep
);
* Check that the fileoffset (offset in file to start
* loading) is page alligned
if ((mdp
->fileoffset
% pgsize
) != 0)
fatal("Offset 0x%llx in file %s is not aligned "
mdp
->fileoffset
, mdp
->fnamep
, pgsize
);
/* Allow non-rounded out contents */
if ((mdp
->fileextent
% pgsize
) != 0)
fatal("Loaded extent 0x%llx for file %s is not "
"aligned with 0x%llx pagesize",
mdp
->fileextent
, mdp
->fnamep
, pgsize
);
* Check that the memoffset (offset in memory to start
* loading) is page alligned
if ((mdp
->memoffset
% pgsize
) != 0)
fatal("Offset 0x%llx into RAM for file %s "
"is not aligned with 0x%llx pagesize",
mdp
->memoffset
, mdp
->fnamep
, pgsize
);
ASSERT(NULL
!= datap
|| (NULL
== datap
&&
DBG( PRINTF(("mapping %s (%s): 0x%llx -> 0x%llx (0x%llx)\n",
(is_shared
? "SHARED" : "PRIVATE"),
config_devp
->addrp
->baseaddr
+ mdp
->memoffset
,
config_devp
->addrp
->baseaddr
+ mdp
->memoffset
+
mdp
->fileextent
, mdp
->fileextent
)););
* mmap the file into memory with the appropriate flags.
mapp
= (void*)mmap((void*)(datap
+ mdp
->memoffset
),
(is_shared
? MAP_SHARED
: MAP_PRIVATE
) | MAP_FIXED
|
MAP_NORESERVE
, fd
, mdp
->fileoffset
);
if (((uint8_t *)MAP_FAILED
) == mapp
)
fatal("Failed mapping file %s",
ASSERT((datap
+ mdp
->memoffset
) == mapp
);
* Memory configuration dump
memsprse_dump(config_dev_t
*config_devp
)
* Returns the extent of the linear cacheable block
* starting at offset, and a pointer to the state block
* Note: if the device is a ROM, attempts to write
* i.e. DA_Store as type must be failed -
* the function is supposed to return 0.
memsprse_cacheable(config_addr_t
*config_addrp
, dev_access_t type
,
tpaddr_t offset
, uint8_t **blockp
)
mdp
= config_addrp
->config_devp
->devp
;
/* fail store attempts to a ROM */
if (mdp
->is_rom
&& (type
& DA_Store
))
if ((offset
< 0) || (offset
>= config_addrp
->range
)) {
if (((config_addrp
->baseaddr
+ offset
) & mdp
->mask
) !=
*blockp
= mdp
->datap
+ offset
;
* This expression for the extent relies on parse enforcing
* alignment of base address and size to mdp->stride.
return (mdp
->stride
- (offset
& (mdp
->stride
- 1)));
* Should only get invoked if this is a ROM.
* Indicate the store failed - return false
memsprse_cpu_access(simcpu_t
*sp
, config_addr_t
*cap
, tpaddr_t offset
,
maccess_t op
, uint64_t *regp
)
mdp
= cap
->config_devp
->devp
;
EXEC_WARNING(("memsprse_cpu_access: attempted store to ROM "
"@ pc 0x%llx of %d bytes at offset 0x%llx",
sp
->pc
, 1<<(op
& MA_Size_Mask
), offset
));