2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
|
|
|
|
* Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Architecture- / platform-specific boot-time initialization code for
|
|
|
|
* the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
|
|
|
|
* code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
|
|
|
|
* <dan@net4x.com>.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#undef DEBUG
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/param.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/kdev_t.h>
|
|
|
|
#include <linux/major.h>
|
|
|
|
#include <linux/root_dev.h>
|
2005-11-01 03:45:19 +03:00
|
|
|
#include <linux/kernel.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/iommu.h>
|
2005-08-03 08:43:21 +04:00
|
|
|
#include <asm/firmware.h>
|
2005-11-10 07:53:40 +03:00
|
|
|
#include <asm/system.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/paca.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/sections.h>
|
2005-06-22 04:15:36 +04:00
|
|
|
#include <asm/abs_addr.h>
|
2005-11-02 03:55:28 +03:00
|
|
|
#include <asm/iseries/hv_lp_config.h>
|
2005-11-02 03:11:11 +03:00
|
|
|
#include <asm/iseries/hv_call_event.h>
|
2005-11-02 03:41:12 +03:00
|
|
|
#include <asm/iseries/hv_call_xm.h>
|
2005-11-02 06:13:34 +03:00
|
|
|
#include <asm/iseries/it_lp_queue.h>
|
2005-11-02 07:10:38 +03:00
|
|
|
#include <asm/iseries/mf.h>
|
2005-11-02 04:08:31 +03:00
|
|
|
#include <asm/iseries/hv_lp_event.h>
|
2005-11-02 07:02:47 +03:00
|
|
|
#include <asm/iseries/lpar_map.h>
|
2006-01-11 03:54:08 +03:00
|
|
|
#include <asm/udbg.h>
|
2006-04-04 08:49:48 +04:00
|
|
|
#include <asm/irq.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-01 07:30:26 +03:00
|
|
|
#include "naca.h"
|
2005-09-27 12:44:42 +04:00
|
|
|
#include "setup.h"
|
2005-09-28 17:37:01 +04:00
|
|
|
#include "irq.h"
|
|
|
|
#include "vpd_areas.h"
|
|
|
|
#include "processor_vpd.h"
|
2006-07-13 11:52:17 +04:00
|
|
|
#include "it_lp_naca.h"
|
2005-09-28 17:37:01 +04:00
|
|
|
#include "main_store.h"
|
|
|
|
#include "call_sm.h"
|
2005-10-14 11:09:16 +04:00
|
|
|
#include "call_hpt.h"
|
2005-09-27 12:44:42 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#ifdef DEBUG
|
2006-01-11 03:54:08 +03:00
|
|
|
#define DBG(fmt...) udbg_printf(fmt)
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
|
|
|
#define DBG(fmt...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Function Prototypes */
|
2005-11-10 05:37:51 +03:00
|
|
|
static unsigned long build_iSeries_Memory_Map(void);
|
2005-10-19 17:11:21 +04:00
|
|
|
static void iseries_shared_idle(void);
|
|
|
|
static void iseries_dedicated_idle(void);
|
2005-06-22 04:15:52 +04:00
|
|
|
#ifdef CONFIG_PCI
|
2005-04-17 02:20:36 +04:00
|
|
|
extern void iSeries_pci_final_fixup(void);
|
2005-06-22 04:15:52 +04:00
|
|
|
#else
|
|
|
|
static void iSeries_pci_final_fixup(void) { }
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
extern unsigned long iSeries_recal_tb;
|
|
|
|
extern unsigned long iSeries_recal_titan;
|
|
|
|
|
|
|
|
struct MemoryBlock {
|
|
|
|
unsigned long absStart;
|
|
|
|
unsigned long absEnd;
|
|
|
|
unsigned long logicalStart;
|
|
|
|
unsigned long logicalEnd;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process the main store vpd to determine where the holes in memory are
|
|
|
|
* and return the number of physical blocks and fill in the array of
|
|
|
|
* block data.
|
|
|
|
*/
|
|
|
|
static unsigned long iSeries_process_Condor_mainstore_vpd(
|
|
|
|
struct MemoryBlock *mb_array, unsigned long max_entries)
|
|
|
|
{
|
|
|
|
unsigned long holeFirstChunk, holeSizeChunks;
|
|
|
|
unsigned long numMemoryBlocks = 1;
|
|
|
|
struct IoHriMainStoreSegment4 *msVpd =
|
|
|
|
(struct IoHriMainStoreSegment4 *)xMsVpd;
|
|
|
|
unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
|
|
|
|
unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
|
|
|
|
unsigned long holeSize = holeEnd - holeStart;
|
|
|
|
|
|
|
|
printk("Mainstore_VPD: Condor\n");
|
|
|
|
/*
|
|
|
|
* Determine if absolute memory has any
|
|
|
|
* holes so that we can interpret the
|
|
|
|
* access map we get back from the hypervisor
|
|
|
|
* correctly.
|
|
|
|
*/
|
|
|
|
mb_array[0].logicalStart = 0;
|
|
|
|
mb_array[0].logicalEnd = 0x100000000;
|
|
|
|
mb_array[0].absStart = 0;
|
|
|
|
mb_array[0].absEnd = 0x100000000;
|
|
|
|
|
|
|
|
if (holeSize) {
|
|
|
|
numMemoryBlocks = 2;
|
|
|
|
holeStart = holeStart & 0x000fffffffffffff;
|
|
|
|
holeStart = addr_to_chunk(holeStart);
|
|
|
|
holeFirstChunk = holeStart;
|
|
|
|
holeSize = addr_to_chunk(holeSize);
|
|
|
|
holeSizeChunks = holeSize;
|
|
|
|
printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
|
|
|
|
holeFirstChunk, holeSizeChunks );
|
|
|
|
mb_array[0].logicalEnd = holeFirstChunk;
|
|
|
|
mb_array[0].absEnd = holeFirstChunk;
|
|
|
|
mb_array[1].logicalStart = holeFirstChunk;
|
|
|
|
mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
|
|
|
|
mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
|
|
|
|
mb_array[1].absEnd = 0x100000000;
|
|
|
|
}
|
|
|
|
return numMemoryBlocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MaxSegmentAreas 32
|
|
|
|
#define MaxSegmentAdrRangeBlocks 128
|
|
|
|
#define MaxAreaRangeBlocks 4
|
|
|
|
|
|
|
|
static unsigned long iSeries_process_Regatta_mainstore_vpd(
|
|
|
|
struct MemoryBlock *mb_array, unsigned long max_entries)
|
|
|
|
{
|
|
|
|
struct IoHriMainStoreSegment5 *msVpdP =
|
|
|
|
(struct IoHriMainStoreSegment5 *)xMsVpd;
|
|
|
|
unsigned long numSegmentBlocks = 0;
|
|
|
|
u32 existsBits = msVpdP->msAreaExists;
|
|
|
|
unsigned long area_num;
|
|
|
|
|
|
|
|
printk("Mainstore_VPD: Regatta\n");
|
|
|
|
|
|
|
|
for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
|
|
|
|
unsigned long numAreaBlocks;
|
|
|
|
struct IoHriMainStoreArea4 *currentArea;
|
|
|
|
|
|
|
|
if (existsBits & 0x80000000) {
|
|
|
|
unsigned long block_num;
|
|
|
|
|
|
|
|
currentArea = &msVpdP->msAreaArray[area_num];
|
|
|
|
numAreaBlocks = currentArea->numAdrRangeBlocks;
|
|
|
|
printk("ms_vpd: processing area %2ld blocks=%ld",
|
|
|
|
area_num, numAreaBlocks);
|
|
|
|
for (block_num = 0; block_num < numAreaBlocks;
|
|
|
|
++block_num ) {
|
|
|
|
/* Process an address range block */
|
|
|
|
struct MemoryBlock tempBlock;
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
tempBlock.absStart =
|
|
|
|
(unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
|
|
|
|
tempBlock.absEnd =
|
|
|
|
(unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
|
|
|
|
tempBlock.logicalStart = 0;
|
|
|
|
tempBlock.logicalEnd = 0;
|
|
|
|
printk("\n block %ld absStart=%016lx absEnd=%016lx",
|
|
|
|
block_num, tempBlock.absStart,
|
|
|
|
tempBlock.absEnd);
|
|
|
|
|
|
|
|
for (i = 0; i < numSegmentBlocks; ++i) {
|
|
|
|
if (mb_array[i].absStart ==
|
|
|
|
tempBlock.absStart)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == numSegmentBlocks) {
|
|
|
|
if (numSegmentBlocks == max_entries)
|
|
|
|
panic("iSeries_process_mainstore_vpd: too many memory blocks");
|
|
|
|
mb_array[numSegmentBlocks] = tempBlock;
|
|
|
|
++numSegmentBlocks;
|
|
|
|
} else
|
|
|
|
printk(" (duplicate)");
|
|
|
|
}
|
|
|
|
printk("\n");
|
|
|
|
}
|
|
|
|
existsBits <<= 1;
|
|
|
|
}
|
|
|
|
/* Now sort the blocks found into ascending sequence */
|
|
|
|
if (numSegmentBlocks > 1) {
|
|
|
|
unsigned long m, n;
|
|
|
|
|
|
|
|
for (m = 0; m < numSegmentBlocks - 1; ++m) {
|
|
|
|
for (n = numSegmentBlocks - 1; m < n; --n) {
|
|
|
|
if (mb_array[n].absStart <
|
|
|
|
mb_array[n-1].absStart) {
|
|
|
|
struct MemoryBlock tempBlock;
|
|
|
|
|
|
|
|
tempBlock = mb_array[n];
|
|
|
|
mb_array[n] = mb_array[n-1];
|
|
|
|
mb_array[n-1] = tempBlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Assign "logical" addresses to each block. These
|
|
|
|
* addresses correspond to the hypervisor "bitmap" space.
|
|
|
|
* Convert all addresses into units of 256K chunks.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
unsigned long i, nextBitmapAddress;
|
|
|
|
|
|
|
|
printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
|
|
|
|
nextBitmapAddress = 0;
|
|
|
|
for (i = 0; i < numSegmentBlocks; ++i) {
|
|
|
|
unsigned long length = mb_array[i].absEnd -
|
|
|
|
mb_array[i].absStart;
|
|
|
|
|
|
|
|
mb_array[i].logicalStart = nextBitmapAddress;
|
|
|
|
mb_array[i].logicalEnd = nextBitmapAddress + length;
|
|
|
|
nextBitmapAddress += length;
|
|
|
|
printk(" Bitmap range: %016lx - %016lx\n"
|
|
|
|
" Absolute range: %016lx - %016lx\n",
|
|
|
|
mb_array[i].logicalStart,
|
|
|
|
mb_array[i].logicalEnd,
|
|
|
|
mb_array[i].absStart, mb_array[i].absEnd);
|
|
|
|
mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
|
|
|
|
0x000fffffffffffff);
|
|
|
|
mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
|
|
|
|
0x000fffffffffffff);
|
|
|
|
mb_array[i].logicalStart =
|
|
|
|
addr_to_chunk(mb_array[i].logicalStart);
|
|
|
|
mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return numSegmentBlocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
|
|
|
|
unsigned long max_entries)
|
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
unsigned long mem_blocks = 0;
|
|
|
|
|
|
|
|
if (cpu_has_feature(CPU_FTR_SLB))
|
|
|
|
mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
|
|
|
|
max_entries);
|
|
|
|
else
|
|
|
|
mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
|
|
|
|
max_entries);
|
|
|
|
|
|
|
|
printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
|
|
|
|
for (i = 0; i < mem_blocks; ++i) {
|
|
|
|
printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
|
|
|
|
" abs chunks %016lx - %016lx\n",
|
|
|
|
i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
|
|
|
|
mb_array[i].absStart, mb_array[i].absEnd);
|
|
|
|
}
|
|
|
|
return mem_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init iSeries_get_cmdline(void)
|
|
|
|
{
|
|
|
|
char *p, *q;
|
|
|
|
|
|
|
|
/* copy the command line parameter from the primary VSP */
|
|
|
|
HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
|
|
|
|
HvLpDma_Direction_RemoteToLocal);
|
|
|
|
|
|
|
|
p = cmd_line;
|
|
|
|
q = cmd_line + 255;
|
|
|
|
while(p < q) {
|
|
|
|
if (!*p || *p == '\n')
|
|
|
|
break;
|
|
|
|
++p;
|
|
|
|
}
|
|
|
|
*p = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init iSeries_init_early(void)
|
|
|
|
{
|
|
|
|
DBG(" -> iSeries_init_early()\n");
|
|
|
|
|
|
|
|
iSeries_recal_tb = get_tb();
|
|
|
|
iSeries_recal_titan = HvCallXm_loadTod();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the DMA/TCE management
|
|
|
|
*/
|
|
|
|
iommu_init_early_iSeries();
|
|
|
|
|
|
|
|
/* Initialize machine-dependency vectors */
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
smp_init_iSeries();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Associate Lp Event Queue 0 with processor 0 */
|
|
|
|
HvCallEvent_setLpEventQueueInterruptProc(0, 0);
|
|
|
|
|
|
|
|
mf_init();
|
|
|
|
|
|
|
|
DBG(" <- iSeries_init_early()\n");
|
|
|
|
}
|
|
|
|
|
2005-08-03 14:21:23 +04:00
|
|
|
struct mschunks_map mschunks_map = {
|
2005-08-03 14:21:23 +04:00
|
|
|
/* XXX We don't use these, but Piranha might need them. */
|
|
|
|
.chunk_size = MSCHUNKS_CHUNK_SIZE,
|
|
|
|
.chunk_shift = MSCHUNKS_CHUNK_SHIFT,
|
|
|
|
.chunk_mask = MSCHUNKS_OFFSET_MASK,
|
|
|
|
};
|
2005-08-03 14:21:23 +04:00
|
|
|
EXPORT_SYMBOL(mschunks_map);
|
2005-08-03 14:21:23 +04:00
|
|
|
|
2005-08-03 14:21:23 +04:00
|
|
|
void mschunks_alloc(unsigned long num_chunks)
|
2005-08-03 14:21:23 +04:00
|
|
|
{
|
|
|
|
klimit = _ALIGN(klimit, sizeof(u32));
|
2005-08-03 14:21:23 +04:00
|
|
|
mschunks_map.mapping = (u32 *)klimit;
|
2005-08-03 14:21:23 +04:00
|
|
|
klimit += num_chunks * sizeof(u32);
|
2005-08-03 14:21:23 +04:00
|
|
|
mschunks_map.num_chunks = num_chunks;
|
2005-08-03 14:21:23 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* The iSeries may have very large memories ( > 128 GB ) and a partition
|
|
|
|
* may get memory in "chunks" that may be anywhere in the 2**52 real
|
|
|
|
* address space. The chunks are 256K in size. To map this to the
|
|
|
|
* memory model Linux expects, the AS/400 specific code builds a
|
|
|
|
* translation table to translate what Linux thinks are "physical"
|
|
|
|
* addresses to the actual real addresses. This allows us to make
|
|
|
|
* it appear to Linux that we have contiguous memory starting at
|
|
|
|
* physical address zero while in fact this could be far from the truth.
|
|
|
|
* To avoid confusion, I'll let the words physical and/or real address
|
|
|
|
* apply to the Linux addresses while I'll use "absolute address" to
|
|
|
|
* refer to the actual hardware real address.
|
|
|
|
*
|
|
|
|
* build_iSeries_Memory_Map gets information from the Hypervisor and
|
|
|
|
* looks at the Main Store VPD to determine the absolute addresses
|
|
|
|
* of the memory that has been assigned to our partition and builds
|
|
|
|
* a table used to translate Linux's physical addresses to these
|
|
|
|
* absolute addresses. Absolute addresses are needed when
|
|
|
|
* communicating with the hypervisor (e.g. to build HPT entries)
|
2005-11-10 05:37:51 +03:00
|
|
|
*
|
|
|
|
* Returns the physical memory size
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
|
2005-11-10 05:37:51 +03:00
|
|
|
static unsigned long __init build_iSeries_Memory_Map(void)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
|
|
|
|
u32 nextPhysChunk;
|
|
|
|
u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
|
|
|
|
u32 totalChunks,moreChunks;
|
|
|
|
u32 currChunk, thisChunk, absChunk;
|
|
|
|
u32 currDword;
|
|
|
|
u32 chunkBit;
|
|
|
|
u64 map;
|
|
|
|
struct MemoryBlock mb[32];
|
|
|
|
unsigned long numMemoryBlocks, curBlock;
|
|
|
|
|
|
|
|
/* Chunk size on iSeries is 256K bytes */
|
|
|
|
totalChunks = (u32)HvLpConfig_getMsChunks();
|
2005-08-03 14:21:23 +04:00
|
|
|
mschunks_alloc(totalChunks);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get absolute address of our load area
|
|
|
|
* and map it to physical address 0
|
|
|
|
* This guarantees that the loadarea ends up at physical 0
|
|
|
|
* otherwise, it might not be returned by PLIC as the first
|
|
|
|
* chunks
|
|
|
|
*/
|
|
|
|
|
|
|
|
loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
|
|
|
|
loadAreaSize = itLpNaca.xLoadAreaChunks;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only add the pages already mapped here.
|
|
|
|
* Otherwise we might add the hpt pages
|
|
|
|
* The rest of the pages of the load area
|
|
|
|
* aren't in the HPT yet and can still
|
|
|
|
* be assigned an arbitrary physical address
|
|
|
|
*/
|
|
|
|
if ((loadAreaSize * 64) > HvPagesToMap)
|
|
|
|
loadAreaSize = HvPagesToMap / 64;
|
|
|
|
|
|
|
|
loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO Do we need to do something if the HPT is in the 64MB load area?
|
|
|
|
* This would be required if the itLpNaca.xLoadAreaChunks includes
|
|
|
|
* the HPT size
|
|
|
|
*/
|
|
|
|
|
|
|
|
printk("Mapping load area - physical addr = 0000000000000000\n"
|
|
|
|
" absolute addr = %016lx\n",
|
|
|
|
chunk_to_addr(loadAreaFirstChunk));
|
|
|
|
printk("Load area size %dK\n", loadAreaSize * 256);
|
|
|
|
|
|
|
|
for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
|
2005-08-03 14:21:23 +04:00
|
|
|
mschunks_map.mapping[nextPhysChunk] =
|
2005-04-17 02:20:36 +04:00
|
|
|
loadAreaFirstChunk + nextPhysChunk;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get absolute address of our HPT and remember it so
|
|
|
|
* we won't map it to any physical address
|
|
|
|
*/
|
|
|
|
hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
|
|
|
|
hptSizePages = (u32)HvCallHpt_getHptPages();
|
2005-11-07 03:06:55 +03:00
|
|
|
hptSizeChunks = hptSizePages >>
|
|
|
|
(MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
|
2005-04-17 02:20:36 +04:00
|
|
|
hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
|
|
|
|
|
|
|
|
printk("HPT absolute addr = %016lx, size = %dK\n",
|
|
|
|
chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine if absolute memory has any
|
|
|
|
* holes so that we can interpret the
|
|
|
|
* access map we get back from the hypervisor
|
|
|
|
* correctly.
|
|
|
|
*/
|
|
|
|
numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process the main store access map from the hypervisor
|
|
|
|
* to build up our physical -> absolute translation table
|
|
|
|
*/
|
|
|
|
curBlock = 0;
|
|
|
|
currChunk = 0;
|
|
|
|
currDword = 0;
|
|
|
|
moreChunks = totalChunks;
|
|
|
|
|
|
|
|
while (moreChunks) {
|
|
|
|
map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
|
|
|
|
currDword);
|
|
|
|
thisChunk = currChunk;
|
|
|
|
while (map) {
|
|
|
|
chunkBit = map >> 63;
|
|
|
|
map <<= 1;
|
|
|
|
if (chunkBit) {
|
|
|
|
--moreChunks;
|
|
|
|
while (thisChunk >= mb[curBlock].logicalEnd) {
|
|
|
|
++curBlock;
|
|
|
|
if (curBlock >= numMemoryBlocks)
|
|
|
|
panic("out of memory blocks");
|
|
|
|
}
|
|
|
|
if (thisChunk < mb[curBlock].logicalStart)
|
|
|
|
panic("memory block error");
|
|
|
|
|
|
|
|
absChunk = mb[curBlock].absStart +
|
|
|
|
(thisChunk - mb[curBlock].logicalStart);
|
|
|
|
if (((absChunk < hptFirstChunk) ||
|
|
|
|
(absChunk > hptLastChunk)) &&
|
|
|
|
((absChunk < loadAreaFirstChunk) ||
|
|
|
|
(absChunk > loadAreaLastChunk))) {
|
2005-08-03 14:21:23 +04:00
|
|
|
mschunks_map.mapping[nextPhysChunk] =
|
|
|
|
absChunk;
|
2005-04-17 02:20:36 +04:00
|
|
|
++nextPhysChunk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++thisChunk;
|
|
|
|
}
|
|
|
|
++currDword;
|
|
|
|
currChunk += 64;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* main store size (in chunks) is
|
|
|
|
* totalChunks - hptSizeChunks
|
|
|
|
* which should be equal to
|
|
|
|
* nextPhysChunk
|
|
|
|
*/
|
2005-11-10 05:37:51 +03:00
|
|
|
return chunk_to_addr(nextPhysChunk);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Document me.
|
|
|
|
*/
|
|
|
|
static void __init iSeries_setup_arch(void)
|
|
|
|
{
|
2006-01-13 02:26:42 +03:00
|
|
|
if (get_lppaca()->shared_proc) {
|
2005-09-23 08:10:59 +04:00
|
|
|
ppc_md.idle_loop = iseries_shared_idle;
|
2006-04-13 00:23:22 +04:00
|
|
|
printk(KERN_DEBUG "Using shared processor idle loop\n");
|
2005-09-23 08:10:59 +04:00
|
|
|
} else {
|
|
|
|
ppc_md.idle_loop = iseries_dedicated_idle;
|
2006-04-13 00:23:22 +04:00
|
|
|
printk(KERN_DEBUG "Using dedicated idle loop\n");
|
2005-09-23 08:10:59 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Setup the Lp Event Queue */
|
2005-06-30 09:08:27 +04:00
|
|
|
setup_hvlpevent_queue();
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
printk("Max logical processors = %d\n",
|
|
|
|
itVpdAreas.xSlicMaxLogicalProcs);
|
|
|
|
printk("Max physical processors = %d\n",
|
|
|
|
itVpdAreas.xSlicMaxPhysicalProcs);
|
|
|
|
}
|
|
|
|
|
2005-10-20 11:02:01 +04:00
|
|
|
static void iSeries_show_cpuinfo(struct seq_file *m)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init iSeries_progress(char * st, unsigned short code)
|
|
|
|
{
|
|
|
|
printk("Progress: [%04x] - %s\n", (unsigned)code, st);
|
2006-03-21 12:46:02 +03:00
|
|
|
mf_display_progress(code);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init iSeries_fixup_klimit(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Change klimit to take into account any ram disk
|
|
|
|
* that may be included
|
|
|
|
*/
|
|
|
|
if (naca.xRamDisk)
|
|
|
|
klimit = KERNELBASE + (u64)naca.xRamDisk +
|
2005-11-07 03:06:55 +03:00
|
|
|
(naca.xRamDiskSize * HW_PAGE_SIZE);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init iSeries_src_init(void)
|
|
|
|
{
|
|
|
|
/* clear the progress line */
|
|
|
|
ppc_md.progress(" ", 0xffff);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
late_initcall(iSeries_src_init);
|
|
|
|
|
2005-07-08 04:56:29 +04:00
|
|
|
static inline void process_iSeries_events(void)
|
|
|
|
{
|
|
|
|
asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void yield_shared_processor(void)
|
|
|
|
{
|
|
|
|
unsigned long tb;
|
|
|
|
|
|
|
|
HvCall_setEnabledInterrupts(HvCall_MaskIPI |
|
|
|
|
HvCall_MaskLpEvent |
|
|
|
|
HvCall_MaskLpProd |
|
|
|
|
HvCall_MaskTimeout);
|
|
|
|
|
|
|
|
tb = get_tb();
|
|
|
|
/* Compute future tb value when yield should expire */
|
|
|
|
HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The decrementer stops during the yield. Force a fake decrementer
|
|
|
|
* here and let the timer_interrupt code sort out the actual time.
|
|
|
|
*/
|
2006-01-13 02:26:42 +03:00
|
|
|
get_lppaca()->int_dword.fields.decr_int = 1;
|
2006-02-13 06:48:35 +03:00
|
|
|
ppc64_runlatch_on();
|
2005-07-08 04:56:29 +04:00
|
|
|
process_iSeries_events();
|
|
|
|
}
|
|
|
|
|
2005-10-19 17:11:21 +04:00
|
|
|
static void iseries_shared_idle(void)
|
2005-07-08 04:56:29 +04:00
|
|
|
{
|
2005-07-08 04:56:32 +04:00
|
|
|
while (1) {
|
|
|
|
while (!need_resched() && !hvlpevent_is_pending()) {
|
|
|
|
local_irq_disable();
|
|
|
|
ppc64_runlatch_off();
|
|
|
|
|
|
|
|
/* Recheck with irqs off */
|
|
|
|
if (!need_resched() && !hvlpevent_is_pending())
|
|
|
|
yield_shared_processor();
|
2005-07-08 04:56:29 +04:00
|
|
|
|
2005-07-08 04:56:32 +04:00
|
|
|
HMT_medium();
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
ppc64_runlatch_on();
|
2005-07-08 04:56:29 +04:00
|
|
|
|
2005-07-08 04:56:32 +04:00
|
|
|
if (hvlpevent_is_pending())
|
|
|
|
process_iSeries_events();
|
|
|
|
|
2005-11-09 08:39:01 +03:00
|
|
|
preempt_enable_no_resched();
|
2005-07-08 04:56:32 +04:00
|
|
|
schedule();
|
2005-11-09 08:39:01 +03:00
|
|
|
preempt_disable();
|
2005-07-08 04:56:32 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-10-19 17:11:21 +04:00
|
|
|
static void iseries_dedicated_idle(void)
|
2005-07-08 04:56:32 +04:00
|
|
|
{
|
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 08:39:04 +03:00
|
|
|
set_thread_flag(TIF_POLLING_NRFLAG);
|
2005-07-08 04:56:29 +04:00
|
|
|
|
|
|
|
while (1) {
|
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 08:39:04 +03:00
|
|
|
if (!need_resched()) {
|
2005-07-08 04:56:32 +04:00
|
|
|
while (!need_resched()) {
|
|
|
|
ppc64_runlatch_off();
|
|
|
|
HMT_low();
|
|
|
|
|
|
|
|
if (hvlpevent_is_pending()) {
|
2005-07-08 04:56:29 +04:00
|
|
|
HMT_medium();
|
2005-07-08 04:56:32 +04:00
|
|
|
ppc64_runlatch_on();
|
|
|
|
process_iSeries_events();
|
2005-07-08 04:56:29 +04:00
|
|
|
}
|
|
|
|
}
|
2005-07-08 04:56:32 +04:00
|
|
|
|
|
|
|
HMT_medium();
|
2005-07-08 04:56:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ppc64_runlatch_on();
|
2005-11-09 08:39:01 +03:00
|
|
|
preempt_enable_no_resched();
|
2005-07-08 04:56:29 +04:00
|
|
|
schedule();
|
2005-11-09 08:39:01 +03:00
|
|
|
preempt_disable();
|
2005-07-08 04:56:29 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-06-22 04:15:52 +04:00
|
|
|
#ifndef CONFIG_PCI
|
|
|
|
void __init iSeries_init_IRQ(void) { }
|
|
|
|
#endif
|
|
|
|
|
2006-11-13 01:27:39 +03:00
|
|
|
static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
|
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).
While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).
A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).
Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.
In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)
Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.
The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-11 09:25:10 +03:00
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
return (void __iomem *)address;
|
|
|
|
}
|
|
|
|
|
2006-11-13 01:27:39 +03:00
|
|
|
static void iseries_iounmap(volatile void __iomem *token)
|
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).
While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).
A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).
Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.
In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)
Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.
The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-11 09:25:10 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2006-09-26 07:55:58 +04:00
|
|
|
/*
|
|
|
|
* iSeries has no legacy IO, anything calling this function has to
|
|
|
|
* fail or bad things will happen
|
|
|
|
*/
|
|
|
|
static int iseries_check_legacy_ioport(unsigned int baseport)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2006-03-28 16:15:54 +04:00
|
|
|
static int __init iseries_probe(void)
|
2005-09-23 08:59:04 +04:00
|
|
|
{
|
2006-03-28 16:15:54 +04:00
|
|
|
unsigned long root = of_get_flat_dt_root();
|
|
|
|
if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
|
2006-03-21 12:45:59 +03:00
|
|
|
return 0;
|
|
|
|
|
2006-06-23 12:16:38 +04:00
|
|
|
hpte_init_iSeries();
|
|
|
|
|
2006-03-21 12:45:59 +03:00
|
|
|
return 1;
|
2005-09-23 08:59:04 +04:00
|
|
|
}
|
|
|
|
|
2006-03-28 16:15:54 +04:00
|
|
|
define_machine(iseries) {
|
|
|
|
.name = "iSeries",
|
2005-09-23 08:10:59 +04:00
|
|
|
.setup_arch = iSeries_setup_arch,
|
2005-10-20 11:02:01 +04:00
|
|
|
.show_cpuinfo = iSeries_show_cpuinfo,
|
2005-09-23 08:10:59 +04:00
|
|
|
.init_IRQ = iSeries_init_IRQ,
|
|
|
|
.get_irq = iSeries_get_irq,
|
|
|
|
.init_early = iSeries_init_early,
|
|
|
|
.pcibios_fixup = iSeries_pci_final_fixup,
|
2006-03-21 12:46:04 +03:00
|
|
|
.restart = mf_reboot,
|
|
|
|
.power_off = mf_power_off,
|
|
|
|
.halt = mf_power_off,
|
2005-09-23 08:10:59 +04:00
|
|
|
.get_boot_time = iSeries_get_boot_time,
|
|
|
|
.set_rtc_time = iSeries_set_rtc_time,
|
|
|
|
.get_rtc_time = iSeries_get_rtc_time,
|
2005-09-23 09:03:10 +04:00
|
|
|
.calibrate_decr = generic_calibrate_decr,
|
2005-09-23 08:10:59 +04:00
|
|
|
.progress = iSeries_progress,
|
2005-09-23 08:59:04 +04:00
|
|
|
.probe = iseries_probe,
|
2006-09-26 07:55:58 +04:00
|
|
|
.check_legacy_ioport = iseries_check_legacy_ioport,
|
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).
While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).
A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).
Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.
In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)
Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.
The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-11 09:25:10 +03:00
|
|
|
.ioremap = iseries_ioremap,
|
|
|
|
.iounmap = iseries_iounmap,
|
2005-09-23 08:10:59 +04:00
|
|
|
/* XXX Implement enable_pmcs for iSeries */
|
|
|
|
};
|
|
|
|
|
2005-09-23 08:59:04 +04:00
|
|
|
void * __init iSeries_early_setup(void)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2005-11-10 05:37:51 +03:00
|
|
|
unsigned long phys_mem_size;
|
|
|
|
|
2006-10-24 10:42:40 +04:00
|
|
|
/* Identify CPU type. This is done again by the common code later
|
|
|
|
* on but calling this function multiple times is fine.
|
|
|
|
*/
|
|
|
|
identify_cpu(0);
|
|
|
|
|
2006-09-25 07:27:17 +04:00
|
|
|
powerpc_firmware_features |= FW_FEATURE_ISERIES;
|
|
|
|
powerpc_firmware_features |= FW_FEATURE_LPAR;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
iSeries_fixup_klimit();
|
2005-09-23 08:56:09 +04:00
|
|
|
|
2005-09-23 08:59:04 +04:00
|
|
|
/*
|
|
|
|
* Initialize the table which translate Linux physical addresses to
|
|
|
|
* AS/400 absolute addresses
|
|
|
|
*/
|
2005-11-10 05:37:51 +03:00
|
|
|
phys_mem_size = build_iSeries_Memory_Map();
|
2005-09-23 08:59:04 +04:00
|
|
|
|
2005-11-01 03:45:19 +03:00
|
|
|
iSeries_get_cmdline();
|
|
|
|
|
2006-05-19 11:00:04 +04:00
|
|
|
return (void *) __pa(build_flat_dt(phys_mem_size));
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2005-11-01 03:45:19 +03:00
|
|
|
|
2006-01-11 03:54:08 +03:00
|
|
|
static void hvputc(char c)
|
|
|
|
{
|
|
|
|
if (c == '\n')
|
|
|
|
hvputc('\r');
|
|
|
|
|
|
|
|
HvCall_writeLogBuffer(&c, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init udbg_init_iseries(void)
|
|
|
|
{
|
|
|
|
udbg_putc = hvputc;
|
|
|
|
}
|