mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
Merge tag 'mips_6.5_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS fixes from Thomas Bogendoerfer:
- fixes for KVM
- fix for loongson build and cpu probing
- DT fixes
* tag 'mips_6.5_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
MIPS: kvm: Fix build error with KVM_MIPS_DEBUG_COP0_COUNTERS enabled
MIPS: dts: add missing space before {
MIPS: Loongson: Fix build error when make modules_install
MIPS: KVM: Fix NULL pointer dereference
MIPS: Loongson: Fix cpu_probe_loongson() again
This commit is contained in:
@@ -181,16 +181,12 @@ endif
|
|||||||
cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
|
cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
|
||||||
cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
|
cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
|
||||||
|
|
||||||
cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e -Wa,--trap
|
cflags-$(CONFIG_CPU_LOONGSON2E) += $(call cc-option,-march=loongson2e) -Wa,--trap
|
||||||
cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f -Wa,--trap
|
cflags-$(CONFIG_CPU_LOONGSON2F) += $(call cc-option,-march=loongson2f) -Wa,--trap
|
||||||
|
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap
|
||||||
# Some -march= flags enable MMI instructions, and GCC complains about that
|
# Some -march= flags enable MMI instructions, and GCC complains about that
|
||||||
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
|
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
|
||||||
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi)
|
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi)
|
||||||
ifdef CONFIG_CPU_LOONGSON64
|
|
||||||
cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
|
|
||||||
cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a
|
|
||||||
cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2
|
|
||||||
endif
|
|
||||||
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi)
|
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi)
|
||||||
|
|
||||||
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
|
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
stdout-path = "serial0:115200n8";
|
stdout-path = "serial0:115200n8";
|
||||||
};
|
};
|
||||||
|
|
||||||
i2c0_imux: i2c0-imux{
|
i2c0_imux: i2c0-imux {
|
||||||
compatible = "i2c-mux-pinctrl";
|
compatible = "i2c-mux-pinctrl";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|||||||
@@ -75,7 +75,7 @@
|
|||||||
microchip,external-irqs = <3 8 13 18 23>;
|
microchip,external-irqs = <3 8 13 18 23>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pic32_pinctrl: pinctrl@1f801400{
|
pic32_pinctrl: pinctrl@1f801400 {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
compatible = "microchip,pic32mzda-pinctrl";
|
compatible = "microchip,pic32mzda-pinctrl";
|
||||||
|
|||||||
@@ -317,7 +317,7 @@ struct kvm_vcpu_arch {
|
|||||||
unsigned int aux_inuse;
|
unsigned int aux_inuse;
|
||||||
|
|
||||||
/* COP0 State */
|
/* COP0 State */
|
||||||
struct mips_coproc *cop0;
|
struct mips_coproc cop0;
|
||||||
|
|
||||||
/* Resume PC after MMIO completion */
|
/* Resume PC after MMIO completion */
|
||||||
unsigned long io_pc;
|
unsigned long io_pc;
|
||||||
@@ -698,7 +698,7 @@ static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|
|||||||
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
|
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_mips_guest_can_have_fpu(vcpu) &&
|
return kvm_mips_guest_can_have_fpu(vcpu) &&
|
||||||
kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
|
kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
|
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
|
||||||
@@ -710,7 +710,7 @@ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
|
|||||||
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
|
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_mips_guest_can_have_msa(vcpu) &&
|
return kvm_mips_guest_can_have_msa(vcpu) &&
|
||||||
kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
|
kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_mips_callbacks {
|
struct kvm_mips_callbacks {
|
||||||
|
|||||||
@@ -1677,7 +1677,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
|
|||||||
|
|
||||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
c->cputype = CPU_LOONGSON64;
|
||||||
|
|
||||||
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
|
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
|
||||||
|
decode_configs(c);
|
||||||
c->options |= MIPS_CPU_GSEXCEX;
|
c->options |= MIPS_CPU_GSEXCEX;
|
||||||
|
|
||||||
switch (c->processor_id & PRID_IMP_MASK) {
|
switch (c->processor_id & PRID_IMP_MASK) {
|
||||||
@@ -1687,7 +1690,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
|||||||
case PRID_REV_LOONGSON2K_R1_1:
|
case PRID_REV_LOONGSON2K_R1_1:
|
||||||
case PRID_REV_LOONGSON2K_R1_2:
|
case PRID_REV_LOONGSON2K_R1_2:
|
||||||
case PRID_REV_LOONGSON2K_R1_3:
|
case PRID_REV_LOONGSON2K_R1_3:
|
||||||
c->cputype = CPU_LOONGSON64;
|
|
||||||
__cpu_name[cpu] = "Loongson-2K";
|
__cpu_name[cpu] = "Loongson-2K";
|
||||||
set_elf_platform(cpu, "gs264e");
|
set_elf_platform(cpu, "gs264e");
|
||||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||||
@@ -1700,14 +1702,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
|||||||
switch (c->processor_id & PRID_REV_MASK) {
|
switch (c->processor_id & PRID_REV_MASK) {
|
||||||
case PRID_REV_LOONGSON3A_R2_0:
|
case PRID_REV_LOONGSON3A_R2_0:
|
||||||
case PRID_REV_LOONGSON3A_R2_1:
|
case PRID_REV_LOONGSON3A_R2_1:
|
||||||
c->cputype = CPU_LOONGSON64;
|
|
||||||
__cpu_name[cpu] = "ICT Loongson-3";
|
__cpu_name[cpu] = "ICT Loongson-3";
|
||||||
set_elf_platform(cpu, "loongson3a");
|
set_elf_platform(cpu, "loongson3a");
|
||||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||||
break;
|
break;
|
||||||
case PRID_REV_LOONGSON3A_R3_0:
|
case PRID_REV_LOONGSON3A_R3_0:
|
||||||
case PRID_REV_LOONGSON3A_R3_1:
|
case PRID_REV_LOONGSON3A_R3_1:
|
||||||
c->cputype = CPU_LOONGSON64;
|
|
||||||
__cpu_name[cpu] = "ICT Loongson-3";
|
__cpu_name[cpu] = "ICT Loongson-3";
|
||||||
set_elf_platform(cpu, "loongson3a");
|
set_elf_platform(cpu, "loongson3a");
|
||||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||||
@@ -1727,7 +1727,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
|||||||
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
|
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
|
||||||
break;
|
break;
|
||||||
case PRID_IMP_LOONGSON_64G:
|
case PRID_IMP_LOONGSON_64G:
|
||||||
c->cputype = CPU_LOONGSON64;
|
|
||||||
__cpu_name[cpu] = "ICT Loongson-3";
|
__cpu_name[cpu] = "ICT Loongson-3";
|
||||||
set_elf_platform(cpu, "loongson3a");
|
set_elf_platform(cpu, "loongson3a");
|
||||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||||
@@ -1737,8 +1736,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
|||||||
panic("Unknown Loongson Processor ID!");
|
panic("Unknown Loongson Processor ID!");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
decode_configs(c);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
|
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
|
||||||
|
|||||||
@@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
|||||||
*/
|
*/
|
||||||
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
|
|
||||||
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
|
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
|
||||||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
|
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
|
||||||
@@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
ktime_t expires, threshold;
|
ktime_t expires, threshold;
|
||||||
u32 count, compare;
|
u32 count, compare;
|
||||||
int running;
|
int running;
|
||||||
@@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
|||||||
*/
|
*/
|
||||||
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
|
|
||||||
/* If count disabled just read static copy of count */
|
/* If count disabled just read static copy of count */
|
||||||
if (kvm_mips_count_disabled(vcpu))
|
if (kvm_mips_count_disabled(vcpu))
|
||||||
@@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
|
|||||||
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
||||||
ktime_t now, u32 count)
|
ktime_t now, u32 count)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
u32 compare;
|
u32 compare;
|
||||||
u64 delta;
|
u64 delta;
|
||||||
ktime_t expire;
|
ktime_t expire;
|
||||||
@@ -603,7 +603,7 @@ resume:
|
|||||||
*/
|
*/
|
||||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
|
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
|
|
||||||
/* Calculate bias */
|
/* Calculate bias */
|
||||||
@@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
|
|||||||
*/
|
*/
|
||||||
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
|
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
int dc;
|
int dc;
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
u32 count;
|
u32 count;
|
||||||
@@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
|
|||||||
*/
|
*/
|
||||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
int dc;
|
int dc;
|
||||||
u32 old_compare = kvm_read_c0_guest_compare(cop0);
|
u32 old_compare = kvm_read_c0_guest_compare(cop0);
|
||||||
s32 delta = compare - old_compare;
|
s32 delta = compare - old_compare;
|
||||||
@@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
|||||||
*/
|
*/
|
||||||
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
|
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
u32 count;
|
u32 count;
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
|
|
||||||
@@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
|
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
|
|
||||||
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
|
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
|
||||||
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
|
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
|
||||||
@@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
|
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
u32 count;
|
u32 count;
|
||||||
|
|
||||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
|
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
|
||||||
@@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
|
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
|
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
|
||||||
s64 delta;
|
s64 delta;
|
||||||
ktime_t expire, now;
|
ktime_t expire, now;
|
||||||
|
|||||||
@@ -649,7 +649,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
|
|||||||
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||||
const struct kvm_one_reg *reg)
|
const struct kvm_one_reg *reg)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
||||||
int ret;
|
int ret;
|
||||||
s64 v;
|
s64 v;
|
||||||
@@ -761,7 +761,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|||||||
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||||
const struct kvm_one_reg *reg)
|
const struct kvm_one_reg *reg)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
||||||
s64 v;
|
s64 v;
|
||||||
s64 vs[2];
|
s64 vs[2];
|
||||||
@@ -1086,7 +1086,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_mips_pending_timer(vcpu) ||
|
return kvm_mips_pending_timer(vcpu) ||
|
||||||
kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
|
kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
||||||
@@ -1110,7 +1110,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
|||||||
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
|
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
|
||||||
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
|
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
|
||||||
|
|
||||||
cop0 = vcpu->arch.cop0;
|
cop0 = &vcpu->arch.cop0;
|
||||||
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
|
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
|
||||||
kvm_read_c0_guest_status(cop0),
|
kvm_read_c0_guest_status(cop0),
|
||||||
kvm_read_c0_guest_cause(cop0));
|
kvm_read_c0_guest_cause(cop0));
|
||||||
@@ -1232,7 +1232,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
case EXCCODE_TLBS:
|
case EXCCODE_TLBS:
|
||||||
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
|
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
|
||||||
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
|
cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
|
||||||
badvaddr);
|
badvaddr);
|
||||||
|
|
||||||
++vcpu->stat.tlbmiss_st_exits;
|
++vcpu->stat.tlbmiss_st_exits;
|
||||||
@@ -1304,7 +1304,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
|||||||
kvm_get_badinstr(opc, vcpu, &inst);
|
kvm_get_badinstr(opc, vcpu, &inst);
|
||||||
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
|
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
|
||||||
exccode, opc, inst, badvaddr,
|
exccode, opc, inst, badvaddr,
|
||||||
kvm_read_c0_guest_status(vcpu->arch.cop0));
|
kvm_read_c0_guest_status(&vcpu->arch.cop0));
|
||||||
kvm_arch_vcpu_dump_regs(vcpu);
|
kvm_arch_vcpu_dump_regs(vcpu);
|
||||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||||
ret = RESUME_HOST;
|
ret = RESUME_HOST;
|
||||||
@@ -1377,7 +1377,7 @@ int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
|||||||
/* Enable FPU for guest and restore context */
|
/* Enable FPU for guest and restore context */
|
||||||
void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
unsigned int sr, cfg5;
|
unsigned int sr, cfg5;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
@@ -1421,7 +1421,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
|||||||
/* Enable MSA for guest and restore context */
|
/* Enable MSA for guest and restore context */
|
||||||
void kvm_own_msa(struct kvm_vcpu *vcpu)
|
void kvm_own_msa(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
unsigned int sr, cfg5;
|
unsigned int sr, cfg5;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|||||||
@@ -54,9 +54,9 @@ void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
|
|||||||
kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
|
kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
|
||||||
for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
|
for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
|
||||||
for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
|
for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
|
||||||
if (vcpu->arch.cop0->stat[i][j])
|
if (vcpu->arch.cop0.stat[i][j])
|
||||||
kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
|
kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
|
||||||
vcpu->arch.cop0->stat[i][j]);
|
vcpu->arch.cop0.stat[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
|
__entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0);
|
||||||
__entry->pc = vcpu->arch.pc;
|
__entry->pc = vcpu->arch.pc;
|
||||||
__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
|
__entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0);
|
||||||
__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
|
__entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0);
|
||||||
__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
|
__entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0);
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
|
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
|
||||||
|
|||||||
@@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
|
|||||||
*/
|
*/
|
||||||
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
|
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
u32 cause, compare;
|
u32 cause, compare;
|
||||||
|
|
||||||
compare = kvm_read_sw_gc0_compare(cop0);
|
compare = kvm_read_sw_gc0_compare(cop0);
|
||||||
@@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
|
|||||||
*/
|
*/
|
||||||
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
|
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
u32 gctl0, compare, cause;
|
u32 gctl0, compare, cause;
|
||||||
|
|
||||||
gctl0 = read_c0_guestctl0();
|
gctl0 = read_c0_guestctl0();
|
||||||
@@ -863,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val)
|
|||||||
|
|
||||||
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
|
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
|
|
||||||
val &= MIPS_MAARI_INDEX;
|
val &= MIPS_MAARI_INDEX;
|
||||||
if (val == MIPS_MAARI_INDEX)
|
if (val == MIPS_MAARI_INDEX)
|
||||||
@@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
|
|||||||
u32 *opc, u32 cause,
|
u32 *opc, u32 cause,
|
||||||
struct kvm_vcpu *vcpu)
|
struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
enum emulation_result er = EMULATE_DONE;
|
enum emulation_result er = EMULATE_DONE;
|
||||||
u32 rt, rd, sel;
|
u32 rt, rd, sel;
|
||||||
unsigned long curr_pc;
|
unsigned long curr_pc;
|
||||||
@@ -1911,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
|
|||||||
const struct kvm_one_reg *reg,
|
const struct kvm_one_reg *reg,
|
||||||
s64 *v)
|
s64 *v)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
|
|
||||||
switch (reg->id) {
|
switch (reg->id) {
|
||||||
@@ -2081,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
|
|||||||
case KVM_REG_MIPS_CP0_MAARI:
|
case KVM_REG_MIPS_CP0_MAARI:
|
||||||
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
|
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
*v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
|
*v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
case KVM_REG_MIPS_CP0_XCONTEXT:
|
case KVM_REG_MIPS_CP0_XCONTEXT:
|
||||||
@@ -2135,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
|
|||||||
const struct kvm_one_reg *reg,
|
const struct kvm_one_reg *reg,
|
||||||
s64 v)
|
s64 v)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned int cur, change;
|
unsigned int cur, change;
|
||||||
@@ -2562,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
|
|||||||
|
|
||||||
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
bool migrated, all;
|
bool migrated, all;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2704,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||||||
|
|
||||||
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
|
|
||||||
if (current->flags & PF_VCPU)
|
if (current->flags & PF_VCPU)
|
||||||
kvm_vz_vcpu_save_wired(vcpu);
|
kvm_vz_vcpu_save_wired(vcpu);
|
||||||
@@ -3076,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
|
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
struct mips_coproc *cop0 = &vcpu->arch.cop0;
|
||||||
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
|
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
Reference in New Issue
Block a user