From 3494df00039215f6d80093bce5d92ad5015baa5f Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Thu, 29 Jul 2010 17:19:53 -0700 Subject: [PATCH] initial srpm import --- .gitignore | 2 + Makefile.config | 105 + acpi-ec-add-delay-before-write.patch | 52 + ...le-gpes-for-system-wakeup-in-advance.patch | 106 + ...9d2cc6863c7874889ea60a871fb71399dd3f.patch | 1285 + ...ab332832519c2e292db13f509e4360495a5a.patch | 44 + ...a735991c80fb0fc1bd4a13a93681c3c17ce0.patch | 165 + ...7447c1d94a79d5cfc647430784306b3a8323.patch | 381 + ...bbb7704cbf515c0fc68970abbe4e91d68521.patch | 77 + ...ace02320a3fb9614ddb27edc3a8700d68b26.patch | 53 + ...752de65dbfa6e522f1d666deb0ac152ef367.patch | 43 + add-appleir-usb-driver.patch | 666 + ata-generic-handle-new-mbp-with-mcp89.patch | 79 + ata-generic-implement-ata-gen-flags.patch | 114 + ...mask-when-noacl-mount-option-is-used.patch | 42 + ...irect-problem-in-the-dns-lookup-code.patch | 200 + config-arm | 110 + config-debug | 83 + config-generic | 4176 ++ config-i686-PAE | 5 + config-ia64-generic | 205 + config-nodebug | 83 + config-powerpc-generic | 331 + config-powerpc32-generic | 182 + config-powerpc32-smp | 4 + config-powerpc64 | 184 + config-rhel-generic | 205 + config-s390x | 227 + config-sparc64-generic | 201 + config-x86-generic | 473 + config-x86_64-generic | 397 + coredump-uid-pipe-check.patch | 14 + crypto-add-async-hash-testing.patch | 111 + die-floppy-die.patch | 30 + disable-i8042-check-on-apple-mac.patch | 59 + drm-encoder-disable.patch | 74 + ...-add-reclaimable-to-page-allocations.patch | 48 + drm-i915-fix-edp-panels.patch | 12 + ...i915-fix-hibernate-memory-corruption.patch | 41 + ...G4X-style-PLL-search-more-permissive.patch | 51 + drm-intel-945gm-stability-fixes.patch | 102 + drm-intel-big-hammer.patch | 16 + drm-intel-make-lvds-work.patch | 19 + drm-intel-next.patch | 1 + drm-next.patch | 44474 ++++++++++++++++ drm-nouveau-updates.patch | 5903 ++ drm-radeon-fix-shared-ddc-handling.patch | 36 + ...rk-output-polling-to-be-back-in-core.patch | 958 + ethtool-fix-buffer-overflow.patch | 33 + find-provides | 44 + fix_xen_guest_on_old_EC2.patch | 34 + genkey | 7 + git-bluetooth.patch | 0 git-cpufreq.patch | 0 git-linus.diff | 0 hda_intel-prealloc-4mb-dmabuffer.patch | 47 + hdpvr-ir-enable.patch | 216 + i915-fix-crt-hotplug-regression.patch | 85 + inotify-fix-inotify-oneshot-support.patch | 25 + inotify-send-IN_UNMOUNT-events.patch | 29 + ...capability-id-checks-on-new-hardware.patch | 56 + iwlwifi-Recover-TX-flow-failure.patch | 162 + ...ver-TX-flow-stall-due-to-stuck-queue.patch | 504 + ...internal-short-scan-support-for-3945.patch | 90 + ...l-scan-watchdog-in-iwl_bg_abort_scan.patch | 58 + ...de-cleanup-for-connectivity-recovery.patch | 278 + ..._ack_health-only-apply-to-AGN-device.patch | 150 + iwlwifi-manage-QoS-by-mac-stack.patch | 361 + ...ove-plcp-check-to-separated-function.patch | 208 + iwlwifi-recover_from_tx_stall.patch | 12 + kbuild-fix-modpost-segfault.patch | 43 + kernel.spec | 2338 + ...lict-access-permissions-in-direct-sp.patch | 49 + linux-2.6-acpi-sleep-live-sci-live.patch | 51 + linux-2.6-acpi-video-dos.patch | 17 + linux-2.6-acpi-video-export-edid.patch | 199 + linux-2.6-build-nonintconfig.patch | 128 + linux-2.6-cantiga-iommu-gfx.patch | 26 + linux-2.6-compile-fixes.patch | 6 + linux-2.6-crash-driver.patch | 385 + linux-2.6-debug-always-inline-kzalloc.patch | 25 + linux-2.6-debug-nmi-timeout.patch | 45 + linux-2.6-debug-sizeof-structs.patch | 31 + linux-2.6-debug-taint-vm.patch | 65 + linux-2.6-debug-vm-would-have-oomkilled.patch | 64 + linux-2.6-defaults-acpi-video.patch | 13 + linux-2.6-defaults-aspm.patch | 12 + linux-2.6-defaults-pci_no_msi.patch | 110 + linux-2.6-driver-level-usb-autosuspend.diff | 69 + linux-2.6-enable-btusb-autosuspend.patch | 18 + linux-2.6-execshield.patch | 993 + linux-2.6-firewire-git-pending.patch | 0 linux-2.6-firewire-git-update.patch | 0 linux-2.6-fix-btusb-autosuspend.patch | 18 + linux-2.6-hotfixes.patch | 13 + linux-2.6-input-hid-quirk-egalax.patch | 41 + linux-2.6-input-kill-stupid-messages.patch | 32 + linux-2.6-intel-iommu-igfx.patch | 78 + ...-mac80211-age-scan-results-on-resume.patch | 181 + linux-2.6-makefile-after_link.patch | 57 + linux-2.6-phylib-autoload.patch | 403 + linux-2.6-selinux-mprotect-checks.patch | 124 + linux-2.6-serial-460800.patch | 70 + linux-2.6-silence-acpi-blacklist.patch | 25 + linux-2.6-silence-fbcon-logo.patch | 42 + linux-2.6-silence-noise.patch | 66 + linux-2.6-sparc-selinux-mprotect-checks.patch | 35 + linux-2.6-tracehook.patch | 129 + linux-2.6-umh-refactor.patch | 404 + linux-2.6-upstream-reverts.patch | 953 + linux-2.6-usb-uvc-autosuspend.diff | 19 + linux-2.6-usb-wwan-update.patch | 1637 + linux-2.6-utrace-ptrace.patch | 1974 + linux-2.6-utrace.patch | 4163 ++ ...-2.6-v4l-dvb-add-kworld-a340-support.patch | 161 + linux-2.6-v4l-dvb-add-lgdt3304-support.patch | 350 + linux-2.6-v4l-dvb-experimental.patch | 0 linux-2.6-v4l-dvb-fixes.patch | 0 linux-2.6-v4l-dvb-gspca-fixes.patch | 3849 ++ linux-2.6-v4l-dvb-update.patch | 0 linux-2.6-v4l-dvb-uvcvideo-update.patch | 646 + linux-2.6-vio-modalias.patch | 61 + linux-2.6-x86-cfi_sections.patch | 60 + linux-2.6.29-sparc-IOC_TYPECHECK.patch | 21 + linux-2.6.30-no-pcspkr-modalias.patch | 11 + lirc-2.6.33.patch | 16918 ++++++ ...-do-not-wipe-out-old-supported-rates.patch | 71 + mac80211-explicitly-disable-enable-QoS.patch | 103 + ...es-IE-if-AP-doesnt-give-us-its-rates.patch | 66 + merge.pl | 66 + neuter_intel_microcode_load.patch | 24 + pci-acpi-disable-aspm-if-no-osc.patch | 53 + pci-aspm-dont-enable-too-early.patch | 50 + pci-change-error-messages-to-kern-info.patch | 43 + ...-back-to-original-bios-bar-addresses.patch | 103 + ...o-not-use-native-pcie-pme-by-default.patch | 87 + perf | 12 + prevent-runtime-conntrack-changes.patch | 74 + quiet-prove_RCU-in-cgroups.patch | 36 + ...rm-kms-toggle-poll-around-switcheroo.patch | 65 + sched-fix-over-scheduling-bug.patch | 60 + sources | 2 + ssb_check_for_sprom.patch | 155 + thinkpad-acpi-add-x100e.patch | 11 + thinkpad-acpi-fix-backlight.patch | 56 + usb-obey-the-sysfs-power-wakeup-setting.patch | 65 + virt_console-rollup.patch | 1031 + virtqueue-wrappers.patch | 651 + x86-debug-send-sigtrap-for-user-icebp.patch | 80 + 149 files changed, 104820 insertions(+) create mode 100644 Makefile.config create mode 100644 acpi-ec-add-delay-before-write.patch create mode 100644 acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch create mode 100644 acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch create mode 100644 acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch create mode 100644 acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch create mode 100644 acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch create mode 100644 acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch create mode 100644 acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch create mode 100644 acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch create mode 100644 add-appleir-usb-driver.patch create mode 100644 ata-generic-handle-new-mbp-with-mcp89.patch create mode 100644 ata-generic-implement-ata-gen-flags.patch create mode 100644 btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch create mode 100644 cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch create mode 100644 config-arm create mode 100644 config-debug create mode 100644 config-generic create mode 100644 config-i686-PAE create mode 100644 config-ia64-generic create mode 100644 config-nodebug create mode 100644 config-powerpc-generic create mode 100644 config-powerpc32-generic create mode 100644 config-powerpc32-smp create mode 100644 config-powerpc64 create mode 100644 config-rhel-generic create mode 100644 config-s390x create mode 100644 config-sparc64-generic create mode 100644 config-x86-generic create mode 100644 config-x86_64-generic create mode 100644 coredump-uid-pipe-check.patch create mode 100644 crypto-add-async-hash-testing.patch create mode 100644 die-floppy-die.patch create mode 100644 disable-i8042-check-on-apple-mac.patch create mode 100644 drm-encoder-disable.patch create mode 100644 drm-i915-add-reclaimable-to-page-allocations.patch create mode 100644 drm-i915-fix-edp-panels.patch create mode 100644 drm-i915-fix-hibernate-memory-corruption.patch create mode 100644 drm-i915-make-G4X-style-PLL-search-more-permissive.patch create mode 100644 drm-intel-945gm-stability-fixes.patch create mode 100644 drm-intel-big-hammer.patch create mode 100644 drm-intel-make-lvds-work.patch create mode 100644 drm-intel-next.patch create mode 100644 drm-next.patch create mode 100644 drm-nouveau-updates.patch create mode 100644 drm-radeon-fix-shared-ddc-handling.patch create mode 100644 drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch create mode 100644 ethtool-fix-buffer-overflow.patch create mode 100755 find-provides create mode 100644 fix_xen_guest_on_old_EC2.patch create mode 100644 genkey create mode 100644 git-bluetooth.patch create mode 100644 git-cpufreq.patch create mode 100644 git-linus.diff create mode 100644 hda_intel-prealloc-4mb-dmabuffer.patch create mode 100644 hdpvr-ir-enable.patch create mode 100644 i915-fix-crt-hotplug-regression.patch create mode 100644 inotify-fix-inotify-oneshot-support.patch create mode 100644 inotify-send-IN_UNMOUNT-events.patch create mode 100644 input-synaptics-relax-capability-id-checks-on-new-hardware.patch create mode 100644 iwlwifi-Recover-TX-flow-failure.patch create mode 100644 iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch create mode 100644 iwlwifi-add-internal-short-scan-support-for-3945.patch create mode 100644 iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch create mode 100644 iwlwifi-code-cleanup-for-connectivity-recovery.patch create mode 100644 iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch create mode 100644 iwlwifi-manage-QoS-by-mac-stack.patch create mode 100644 iwlwifi-move-plcp-check-to-separated-function.patch create mode 100644 iwlwifi-recover_from_tx_stall.patch create mode 100644 kbuild-fix-modpost-segfault.patch create mode 100644 kernel.spec create mode 100644 kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch create mode 100644 linux-2.6-acpi-sleep-live-sci-live.patch create mode 100644 linux-2.6-acpi-video-dos.patch create mode 100644 linux-2.6-acpi-video-export-edid.patch create mode 100644 linux-2.6-build-nonintconfig.patch create mode 100644 linux-2.6-cantiga-iommu-gfx.patch create mode 100644 linux-2.6-compile-fixes.patch create mode 100644 linux-2.6-crash-driver.patch create mode 100644 linux-2.6-debug-always-inline-kzalloc.patch create mode 100644 linux-2.6-debug-nmi-timeout.patch create mode 100644 linux-2.6-debug-sizeof-structs.patch create mode 100644 linux-2.6-debug-taint-vm.patch create mode 100644 linux-2.6-debug-vm-would-have-oomkilled.patch create mode 100644 linux-2.6-defaults-acpi-video.patch create mode 100644 linux-2.6-defaults-aspm.patch create mode 100644 linux-2.6-defaults-pci_no_msi.patch create mode 100644 linux-2.6-driver-level-usb-autosuspend.diff create mode 100644 linux-2.6-enable-btusb-autosuspend.patch create mode 100644 linux-2.6-execshield.patch create mode 100644 linux-2.6-firewire-git-pending.patch create mode 100644 linux-2.6-firewire-git-update.patch create mode 100644 linux-2.6-fix-btusb-autosuspend.patch create mode 100644 linux-2.6-hotfixes.patch create mode 100644 linux-2.6-input-hid-quirk-egalax.patch create mode 100644 linux-2.6-input-kill-stupid-messages.patch create mode 100644 linux-2.6-intel-iommu-igfx.patch create mode 100644 linux-2.6-mac80211-age-scan-results-on-resume.patch create mode 100644 linux-2.6-makefile-after_link.patch create mode 100644 linux-2.6-phylib-autoload.patch create mode 100644 linux-2.6-selinux-mprotect-checks.patch create mode 100644 linux-2.6-serial-460800.patch create mode 100644 linux-2.6-silence-acpi-blacklist.patch create mode 100644 linux-2.6-silence-fbcon-logo.patch create mode 100644 linux-2.6-silence-noise.patch create mode 100644 linux-2.6-sparc-selinux-mprotect-checks.patch create mode 100644 linux-2.6-tracehook.patch create mode 100644 linux-2.6-umh-refactor.patch create mode 100644 linux-2.6-upstream-reverts.patch create mode 100644 linux-2.6-usb-uvc-autosuspend.diff create mode 100644 linux-2.6-usb-wwan-update.patch create mode 100644 linux-2.6-utrace-ptrace.patch create mode 100644 linux-2.6-utrace.patch create mode 100644 linux-2.6-v4l-dvb-add-kworld-a340-support.patch create mode 100644 linux-2.6-v4l-dvb-add-lgdt3304-support.patch create mode 100644 linux-2.6-v4l-dvb-experimental.patch create mode 100644 linux-2.6-v4l-dvb-fixes.patch create mode 100644 linux-2.6-v4l-dvb-gspca-fixes.patch create mode 100644 linux-2.6-v4l-dvb-update.patch create mode 100644 linux-2.6-v4l-dvb-uvcvideo-update.patch create mode 100644 linux-2.6-vio-modalias.patch create mode 100644 linux-2.6-x86-cfi_sections.patch create mode 100644 linux-2.6.29-sparc-IOC_TYPECHECK.patch create mode 100644 linux-2.6.30-no-pcspkr-modalias.patch create mode 100644 lirc-2.6.33.patch create mode 100644 mac80211-do-not-wipe-out-old-supported-rates.patch create mode 100644 mac80211-explicitly-disable-enable-QoS.patch create mode 100644 mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch create mode 100755 merge.pl create mode 100644 neuter_intel_microcode_load.patch create mode 100644 pci-acpi-disable-aspm-if-no-osc.patch create mode 100644 pci-aspm-dont-enable-too-early.patch create mode 100644 pci-change-error-messages-to-kern-info.patch create mode 100644 pci-fall-back-to-original-bios-bar-addresses.patch create mode 100644 pci-pm-do-not-use-native-pcie-pme-by-default.patch create mode 100644 perf create mode 100644 prevent-runtime-conntrack-changes.patch create mode 100644 quiet-prove_RCU-in-cgroups.patch create mode 100644 revert-drm-kms-toggle-poll-around-switcheroo.patch create mode 100644 sched-fix-over-scheduling-bug.patch create mode 100644 ssb_check_for_sprom.patch create mode 100644 thinkpad-acpi-add-x100e.patch create mode 100644 thinkpad-acpi-fix-backlight.patch create mode 100644 usb-obey-the-sysfs-power-wakeup-setting.patch create mode 100644 virt_console-rollup.patch create mode 100644 virtqueue-wrappers.patch create mode 100644 x86-debug-send-sigtrap-for-user-icebp.patch diff --git a/.gitignore b/.gitignore index e69de29bb..525e436c9 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,2 @@ +linux-2.6.34.tar.bz2 +patch-2.6.34.1.bz2 diff --git a/Makefile.config b/Makefile.config new file mode 100644 index 000000000..53812fa9b --- /dev/null +++ b/Makefile.config @@ -0,0 +1,105 @@ +# Make rules for configuration files. +# +# $Id$ + +CFG = kernel-$(VERSION) + +CONFIGFILES = \ + $(CFG)-i686.config $(CFG)-i686-debug.config \ + $(CFG)-i686-PAE.config $(CFG)-i686-PAEdebug.config \ + $(CFG)-x86_64.config $(CFG)-x86_64-debug.config \ + $(CFG)-s390x.config $(CFG)-arm.config \ + $(CFG)-ppc.config $(CFG)-ppc-smp.config \ + $(CFG)-sparc64.config \ + $(CFG)-ppc64.config $(CFG)-ppc64-debug.config \ + $(CFG)-ia64.config + +PLATFORMS = x86 x86_64 powerpc powerpc32 powerpc64 s390x ia64 sparc64 +TEMPFILES = $(addprefix temp-, $(addsuffix -generic, $(PLATFORMS))) + +configs: $(CONFIGFILES) + @rm -f kernel-*-config + @rm -f $(TEMPFILES) + @rm -f temp-generic temp-*-generic temp-*-generic-tmp + +# Augment the clean target to clean up our own cruft +clean :: + @rm -fv $(CONFIGFILES) $(TEMPFILES) temp-generic kernel-$(VERSION)*config + +temp-generic: config-generic + cat config-generic config-nodebug > temp-generic + +temp-debug-generic: config-generic + cat config-generic config-debug > temp-debug-generic + +temp-x86-generic: config-x86-generic temp-generic + perl merge.pl $^ > $@ + +temp-x86-debug-generic: config-x86-generic temp-debug-generic + perl merge.pl $^ > $@ + +temp-x86_64-generic: config-x86_64-generic temp-generic + perl merge.pl $^ > $@ + +temp-x86_64-debug-generic: config-x86_64-generic temp-debug-generic + perl merge.pl $^ > $@ + +temp-sparc64-generic: config-sparc64-generic temp-generic + perl merge.pl $^ > $@ + +temp-powerpc-generic: config-powerpc-generic temp-generic + perl merge.pl $^ > $@ + +temp-powerpc-debug-generic: config-powerpc-generic temp-debug-generic + perl merge.pl $^ > $@ + +temp-powerpc32-generic: config-powerpc32-generic temp-powerpc-generic + perl merge.pl $^ > $@ + +temp-s390-generic: config-s390x temp-generic + perl merge.pl $^ > $@ + +temp-ia64-generic: config-ia64-generic temp-generic + perl merge.pl $^ > $@ + +kernel-$(VERSION)-i686-PAE.config: config-i686-PAE temp-x86-generic + perl merge.pl $^ i386 > $@ + +kernel-$(VERSION)-i686-PAEdebug.config: config-i686-PAE temp-x86-debug-generic + perl merge.pl $^ i386 > $@ + +kernel-$(VERSION)-i686.config: /dev/null temp-x86-generic + perl merge.pl $^ i386 > $@ + +kernel-$(VERSION)-i686-debug.config: /dev/null temp-x86-debug-generic + perl merge.pl $^ i386 > $@ + +kernel-$(VERSION)-x86_64.config: /dev/null temp-x86_64-generic + perl merge.pl $^ x86_64 > $@ + +kernel-$(VERSION)-x86_64-debug.config: /dev/null temp-x86_64-debug-generic + perl merge.pl $^ x86_64 > $@ + +kernel-$(VERSION)-sparc64.config: /dev/null temp-sparc64-generic + perl merge.pl $^ sparc64 > $@ + +kernel-$(VERSION)-ppc64.config: config-powerpc64 temp-powerpc-generic + perl merge.pl $^ powerpc > $@ + +kernel-$(VERSION)-ppc64-debug.config: config-powerpc64 temp-powerpc-debug-generic + perl merge.pl $^ powerpc > $@ + +kernel-$(VERSION)-s390x.config: config-s390x temp-s390-generic + perl merge.pl $^ s390 > $@ + +kernel-$(VERSION)-arm.config: config-arm temp-generic + perl merge.pl $^ arm > $@ + +kernel-$(VERSION)-ppc.config: /dev/null temp-powerpc32-generic + perl merge.pl $^ powerpc > $@ + +kernel-$(VERSION)-ppc-smp.config: config-powerpc32-smp temp-powerpc32-generic + perl merge.pl $^ powerpc > $@ + +kernel-$(VERSION)-ia64.config: /dev/null temp-ia64-generic + perl merge.pl $^ ia64 > $@ diff --git a/acpi-ec-add-delay-before-write.patch b/acpi-ec-add-delay-before-write.patch new file mode 100644 index 000000000..af49cccbd --- /dev/null +++ b/acpi-ec-add-delay-before-write.patch @@ -0,0 +1,52 @@ +https://bugzilla.kernel.org/show_bug.cgi?id=14733#c41 + +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 27e0b92..09fbb69 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -226,6 +226,7 @@ static int ec_poll(struct acpi_ec *ec) + if (ec_transaction_done(ec)) + return 0; + } else { ++ msleep(1); + if (wait_event_timeout(ec->wait, + ec_transaction_done(ec), + msecs_to_jiffies(1))) +@@ -233,8 +234,8 @@ static int ec_poll(struct acpi_ec *ec) + } + advance_transaction(ec, acpi_ec_read_status(ec)); + } while (time_before(jiffies, delay)); +- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) +- break; ++// if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) ++// break; + pr_debug(PREFIX "controller reset, restart transaction\n"); + spin_lock_irqsave(&ec->curr_lock, flags); + start_transaction(ec); +@@ -271,15 +272,25 @@ static int ec_check_ibf0(struct acpi_ec *ec) + return (status & ACPI_EC_FLAG_IBF) == 0; + } + ++/* try to clean input buffer with burst_disable transaction */ ++static int acpi_ec_clean_buffer(struct acpi_ec *ec) ++{ ++ struct transaction t = {.command = ACPI_EC_BURST_DISABLE, ++ .wdata = NULL, .rdata = NULL, ++ .wlen = 0, .rlen = 0}; ++ return acpi_ec_transaction_unlocked(ec, &t); ++} ++ + static int ec_wait_ibf0(struct acpi_ec *ec) + { ++ + unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); + /* interrupt wait manually if GPE mode is not active */ + while (time_before(jiffies, delay)) + if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), + msecs_to_jiffies(1))) + return 0; +- return -ETIME; ++ return acpi_ec_clean_buffer(ec); + } + + static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) diff --git a/acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch b/acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch new file mode 100644 index 000000000..8ea7974ae --- /dev/null +++ b/acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch @@ -0,0 +1,106 @@ +From: Rafael J. Wysocki +Date: Thu, 17 Jun 2010 15:40:57 +0000 (+0200) +Subject: ACPI / PM: Do not enable GPEs for system wakeup in advance +X-Git-Tag: v2.6.35-rc4~72^2^2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=cb1cb1780f2025a7d612de09131bf6530f80fb1a + +ACPI / PM: Do not enable GPEs for system wakeup in advance + +After commit 9630bdd9b15d2f489c646d8bc04b60e53eb5ec78 +(ACPI: Use GPE reference counting to support shared GPEs) the wakeup +enable mask bits of GPEs are set as soon as the GPEs are enabled to +wake up the system. Unfortunately, this leads to a regression +reported by Michal Hocko, where a system is woken up from ACPI S5 by +a device that is not supposed to do that, because the wakeup enable +mask bit of this device's GPE is always set when +acpi_enter_sleep_state() calls acpi_hw_enable_all_wakeup_gpes(), +although it should only be set if the device is supposed to wake up +the system from the target state. + +To work around this issue, rework the ACPI power management code so +that GPEs are not enabled to wake up the system upfront, but only +during a system state transition when the target state of the system +is known. [Of course, this means that the reference counting of +"wakeup" GPEs doesn't really make sense and it is sufficient to +set/unset the wakeup mask bits for them during system sleep +transitions. This will allow us to simplify the GPE handling code +quite a bit, but that change is too intrusive for 2.6.35.] + +Fixes https://bugzilla.kernel.org/show_bug.cgi?id=15951 + +Signed-off-by: Rafael J. Wysocki +Reported-and-tested-by: Michal Hocko +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c +index fd51c4a..7d857da 100644 +--- a/drivers/acpi/button.c ++++ b/drivers/acpi/button.c +@@ -425,7 +425,7 @@ static int acpi_button_add(struct acpi_device *device) + /* Button's GPE is run-wake GPE */ + acpi_enable_gpe(device->wakeup.gpe_device, + device->wakeup.gpe_number, +- ACPI_GPE_TYPE_WAKE_RUN); ++ ACPI_GPE_TYPE_RUNTIME); + device->wakeup.run_wake_count++; + device->wakeup.state.enabled = 1; + } +@@ -449,7 +449,7 @@ static int acpi_button_remove(struct acpi_device *device, int type) + if (device->wakeup.flags.valid) { + acpi_disable_gpe(device->wakeup.gpe_device, + device->wakeup.gpe_number, +- ACPI_GPE_TYPE_WAKE_RUN); ++ ACPI_GPE_TYPE_RUNTIME); + device->wakeup.run_wake_count--; + device->wakeup.state.enabled = 0; + } +diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c +index 4b9d339..388747a 100644 +--- a/drivers/acpi/wakeup.c ++++ b/drivers/acpi/wakeup.c +@@ -64,16 +64,13 @@ void acpi_enable_wakeup_device(u8 sleep_state) + struct acpi_device *dev = + container_of(node, struct acpi_device, wakeup_list); + +- if (!dev->wakeup.flags.valid) +- continue; +- +- if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) ++ if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled + || sleep_state > (u32) dev->wakeup.sleep_state) + continue; + + /* The wake-up power should have been enabled already. */ +- acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, +- ACPI_GPE_ENABLE); ++ acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ++ ACPI_GPE_TYPE_WAKE); + } + } + +@@ -96,6 +93,8 @@ void acpi_disable_wakeup_device(u8 sleep_state) + || (sleep_state > (u32) dev->wakeup.sleep_state)) + continue; + ++ acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ++ ACPI_GPE_TYPE_WAKE); + acpi_disable_wakeup_device_power(dev); + } + } +@@ -109,13 +108,8 @@ int __init acpi_wakeup_device_init(void) + struct acpi_device *dev = container_of(node, + struct acpi_device, + wakeup_list); +- /* In case user doesn't load button driver */ +- if (!dev->wakeup.flags.always_enabled || +- dev->wakeup.state.enabled) +- continue; +- acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, +- ACPI_GPE_TYPE_WAKE); +- dev->wakeup.state.enabled = 1; ++ if (dev->wakeup.flags.always_enabled) ++ dev->wakeup.state.enabled = 1; + } + mutex_unlock(&acpi_device_lock); + return 0; diff --git a/acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch b/acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch new file mode 100644 index 000000000..c130dfed4 --- /dev/null +++ b/acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch @@ -0,0 +1,1285 @@ +From: Lin Ming +Date: Tue, 6 Apr 2010 06:52:37 +0000 (+0800) +Subject: ACPICA: Minimize the differences between linux GPE code and ACPICA code base +X-Git-Tag: v2.6.35-rc1~477^2~9 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0f849d2cc6863c7874889ea60a871fb71399dd3f + +ACPICA: Minimize the differences between linux GPE code and ACPICA code base + +[ trivial backport to 2.6.34 ] + +We have ported Rafael's major GPE changes +(ACPI: Use GPE reference counting to support shared GPEs) into ACPICA code base. +But the port and Rafael's original patch have some differences, so we made +below patch to make linux GPE code consistent with ACPICA code base. + +Most changes are about comments and coding styles. +Other noticeable changes are based on: + +Rafael: Reduce code duplication related to GPE lookup +https://patchwork.kernel.org/patch/86237/ + +Rafael: Always use the same lock for GPE locking +https://patchwork.kernel.org/patch/90471/ + +A new field gpe_count in struct acpi_gpe_block_info to record the number +of individual GPEs in block. + +Rename acpi_ev_save_method_info to acpi_ev_match_gpe_method. + +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Robert Moore +Signed-off-by: Lin Ming +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h +index 3e6ba99..5e094a2 100644 +--- a/drivers/acpi/acpica/acevents.h ++++ b/drivers/acpi/acpica/acevents.h +@@ -85,6 +85,10 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); + struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, + u32 gpe_number); + ++struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, ++ struct acpi_gpe_block_info ++ *gpe_block); ++ + /* + * evgpeblk + */ +@@ -118,9 +122,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, + + u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); + +-acpi_status +-acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info); +- + acpi_status acpi_ev_gpe_initialize(void); + + /* +diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h +index 24b8faa..5a6203a 100644 +--- a/drivers/acpi/acpica/aclocal.h ++++ b/drivers/acpi/acpica/aclocal.h +@@ -427,8 +427,8 @@ struct acpi_gpe_event_info { + struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ + u8 flags; /* Misc info about this GPE */ + u8 gpe_number; /* This GPE */ +- u8 runtime_count; +- u8 wakeup_count; ++ u8 runtime_count; /* References to a run GPE */ ++ u8 wakeup_count; /* References to a wake GPE */ + }; + + /* Information about a GPE register pair, one per each status/enable pair in an array */ +@@ -454,6 +454,7 @@ struct acpi_gpe_block_info { + struct acpi_gpe_event_info *event_info; /* One for each GPE */ + struct acpi_generic_address block_address; /* Base address of the block */ + u32 register_count; /* Number of register pairs in block */ ++ u16 gpe_count; /* Number of individual GPEs in block */ + u8 block_base_number; /* Base GPE number for this block */ + }; + +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c +index b9d50ef..deb26f4 100644 +--- a/drivers/acpi/acpica/evgpe.c ++++ b/drivers/acpi/acpica/evgpe.c +@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); + * + * RETURN: Status + * +- * DESCRIPTION: Updates GPE register enable masks based on the GPE type ++ * DESCRIPTION: Updates GPE register enable masks based upon whether there are ++ * references (either wake or run) to this GPE + * + ******************************************************************************/ + +@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) + (1 << + (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); + ++ /* Clear the wake/run bits up front */ ++ + ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); + ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); + +- if (gpe_event_info->runtime_count) ++ /* Set the mask bits only if there are references to this GPE */ ++ ++ if (gpe_event_info->runtime_count) { + ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); ++ } + +- if (gpe_event_info->wakeup_count) ++ if (gpe_event_info->wakeup_count) { + ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); ++ } + + return_ACPI_STATUS(AE_OK); + } +@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) + * + * RETURN: Status + * +- * DESCRIPTION: Enable a GPE based on the GPE type ++ * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless ++ * of type or number of references. ++ * ++ * Note: The GPE lock should be already acquired when this function is called. + * + ******************************************************************************/ + +@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) + { + acpi_status status; + ++ + ACPI_FUNCTION_TRACE(ev_enable_gpe); + +- /* Make sure HW enable masks are updated */ ++ ++ /* ++ * We will only allow a GPE to be enabled if it has either an ++ * associated method (_Lxx/_Exx) or a handler. Otherwise, the ++ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the ++ * first time it fires. ++ */ ++ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { ++ return_ACPI_STATUS(AE_NO_HANDLER); ++ } ++ ++ /* Ensure the HW enable masks are current */ + + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); +- if (ACPI_FAILURE(status)) ++ if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); ++ } ++ ++ /* Clear the GPE (of stale events) */ + +- /* Clear the GPE (of stale events), then enable it */ + status = acpi_hw_clear_gpe(gpe_event_info); +- if (ACPI_FAILURE(status)) ++ if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); ++ } + + /* Enable the requested GPE */ ++ + status = acpi_hw_write_gpe_enable_reg(gpe_event_info); + return_ACPI_STATUS(status); + } +@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) + * + * RETURN: Status + * +- * DESCRIPTION: Disable a GPE based on the GPE type ++ * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE, ++ * regardless of the type or number of references. ++ * ++ * Note: The GPE lock should be already acquired when this function is called. + * + ******************************************************************************/ + +@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) + + ACPI_FUNCTION_TRACE(ev_disable_gpe); + +- /* Make sure HW enable masks are updated */ ++ ++ /* ++ * Note: Always disable the GPE, even if we think that that it is already ++ * disabled. It is possible that the AML or some other code has enabled ++ * the GPE behind our back. ++ */ ++ ++ /* Ensure the HW enable masks are current */ + + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); +- if (ACPI_FAILURE(status)) ++ if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); ++ } + + /* +- * Even if we don't know the GPE type, make sure that we always +- * disable it. low_disable_gpe will just clear the enable bit for this +- * GPE and write it. It will not write out the current GPE enable mask, +- * since this may inadvertently enable GPEs too early, if a rogue GPE has +- * come in during ACPICA initialization - possibly as a result of AML or +- * other code that has enabled the GPE. ++ * Always H/W disable this GPE, even if we don't know the GPE type. ++ * Simply clear the enable bit for this particular GPE, but do not ++ * write out the current GPE enable mask since this may inadvertently ++ * enable GPEs too early. An example is a rogue GPE that has arrived ++ * during ACPICA initialization - possibly because AML or other code ++ * has enabled the GPE. + */ + status = acpi_hw_low_disable_gpe(gpe_event_info); + return_ACPI_STATUS(status); + } + ++ ++/******************************************************************************* ++ * ++ * FUNCTION: acpi_ev_low_get_gpe_info ++ * ++ * PARAMETERS: gpe_number - Raw GPE number ++ * gpe_block - A GPE info block ++ * ++ * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number ++ * is not within the specified GPE block) ++ * ++ * DESCRIPTION: Returns the event_info struct associated with this GPE. This is ++ * the low-level implementation of ev_get_gpe_event_info. ++ * ++ ******************************************************************************/ ++ ++struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, ++ struct acpi_gpe_block_info ++ *gpe_block) ++{ ++ u32 gpe_index; ++ ++ /* ++ * Validate that the gpe_number is within the specified gpe_block. ++ * (Two steps) ++ */ ++ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { ++ return (NULL); ++ } ++ ++ gpe_index = gpe_number - gpe_block->block_base_number; ++ if (gpe_index >= gpe_block->gpe_count) { ++ return (NULL); ++ } ++ ++ return (&gpe_block->event_info[gpe_index]); ++} ++ ++ + /******************************************************************************* + * + * FUNCTION: acpi_ev_get_gpe_event_info +@@ -184,7 +260,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, + u32 gpe_number) + { + union acpi_operand_object *obj_desc; +- struct acpi_gpe_block_info *gpe_block; ++ struct acpi_gpe_event_info *gpe_info; + u32 i; + + ACPI_FUNCTION_ENTRY(); +@@ -196,17 +272,11 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, + /* Examine GPE Block 0 and 1 (These blocks are permanent) */ + + for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { +- gpe_block = acpi_gbl_gpe_fadt_blocks[i]; +- if (gpe_block) { +- if ((gpe_number >= gpe_block->block_base_number) +- && (gpe_number < +- gpe_block->block_base_number + +- (gpe_block->register_count * 8))) { +- return (&gpe_block-> +- event_info[gpe_number - +- gpe_block-> +- block_base_number]); +- } ++ gpe_info = acpi_ev_low_get_gpe_info(gpe_number, ++ acpi_gbl_gpe_fadt_blocks ++ [i]); ++ if (gpe_info) { ++ return (gpe_info); + } + } + +@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, + return (NULL); + } + +- gpe_block = obj_desc->device.gpe_block; +- +- if ((gpe_number >= gpe_block->block_base_number) && +- (gpe_number < +- gpe_block->block_base_number + (gpe_block->register_count * 8))) { +- return (&gpe_block-> +- event_info[gpe_number - gpe_block->block_base_number]); +- } +- +- return (NULL); ++ return (acpi_ev_low_get_gpe_info ++ (gpe_number, obj_desc->device.gpe_block)); + } + + /******************************************************************************* +@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) + return_VOID; + } + +- /* Set the GPE flags for return to enabled state */ ++ /* Update the GPE register masks for return to enabled state */ + + (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); + +@@ -569,15 +631,18 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) + + default: + +- /* No handler or method to run! */ +- ++ /* ++ * No handler or method to run! ++ * 03/2010: This case should no longer be possible. We will not allow ++ * a GPE to be enabled if it has no handler or method. ++ */ + ACPI_ERROR((AE_INFO, + "No handler or method for GPE[%2X], disabling event", + gpe_number)); + + /* +- * Disable the GPE. The GPE will remain disabled until the ACPICA +- * Core Subsystem is restarted, or a handler is installed. ++ * Disable the GPE. The GPE will remain disabled a handler ++ * is installed or ACPICA is restarted. + */ + status = acpi_ev_disable_gpe(gpe_event_info); + if (ACPI_FAILURE(status)) { +diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c +index fa47e35..85ded1f 100644 +--- a/drivers/acpi/acpica/evgpeblk.c ++++ b/drivers/acpi/acpica/evgpeblk.c +@@ -51,7 +51,7 @@ ACPI_MODULE_NAME("evgpeblk") + + /* Local prototypes */ + static acpi_status +-acpi_ev_save_method_info(acpi_handle obj_handle, ++acpi_ev_match_gpe_method(acpi_handle obj_handle, + u32 level, void *obj_desc, void **return_value); + + static acpi_status +@@ -104,9 +104,7 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) + + while (gpe_block) { + if ((&gpe_block->event_info[0] <= gpe_event_info) && +- (&gpe_block->event_info[((acpi_size) +- gpe_block-> +- register_count) * 8] > ++ (&gpe_block->event_info[gpe_block->gpe_count] > + gpe_event_info)) { + return (TRUE); + } +@@ -229,7 +227,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + + /******************************************************************************* + * +- * FUNCTION: acpi_ev_save_method_info ++ * FUNCTION: acpi_ev_match_gpe_method + * + * PARAMETERS: Callback from walk_namespace + * +@@ -241,8 +239,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + * information for quick lookup during GPE dispatch + * + * The name of each GPE control method is of the form: +- * "_Lxx" or "_Exx" +- * Where: ++ * "_Lxx" or "_Exx", where: + * L - means that the GPE is level triggered + * E - means that the GPE is edge triggered + * xx - is the GPE number [in HEX] +@@ -250,9 +247,11 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + ******************************************************************************/ + + static acpi_status +-acpi_ev_save_method_info(acpi_handle obj_handle, ++acpi_ev_match_gpe_method(acpi_handle obj_handle, + u32 level, void *obj_desc, void **return_value) + { ++ struct acpi_namespace_node *method_node = ++ ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle); + struct acpi_gpe_block_info *gpe_block = (void *)obj_desc; + struct acpi_gpe_event_info *gpe_event_info; + u32 gpe_number; +@@ -262,21 +261,25 @@ acpi_ev_save_method_info(acpi_handle obj_handle, + ACPI_FUNCTION_TRACE(ev_save_method_info); + + /* +- * _Lxx and _Exx GPE method support ++ * Match and decode the _Lxx and _Exx GPE method names + * +- * 1) Extract the name from the object and convert to a string ++ * 1) Extract the method name and null terminate it + */ +- ACPI_MOVE_32_TO_32(name, +- &((struct acpi_namespace_node *)obj_handle)->name. +- integer); ++ ACPI_MOVE_32_TO_32(name, &method_node->name.integer); + name[ACPI_NAME_SIZE] = 0; + ++ /* 2) Name must begin with an underscore */ ++ ++ if (name[0] != '_') { ++ return_ACPI_STATUS(AE_OK); /* Ignore this method */ ++ } ++ + /* +- * 2) Edge/Level determination is based on the 2nd character ++ * 3) Edge/Level determination is based on the 2nd character + * of the method name + * +- * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE +- * if a _PRW object is found that points to this GPE. ++ * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is ++ * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set. + */ + switch (name[1]) { + case 'L': +@@ -288,7 +291,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle, + break; + + default: +- /* Unknown method type, just ignore it! */ ++ /* Unknown method type, just ignore it */ + + ACPI_DEBUG_PRINT((ACPI_DB_LOAD, + "Ignoring unknown GPE method type: %s " +@@ -296,7 +299,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle, + return_ACPI_STATUS(AE_OK); + } + +- /* Convert the last two characters of the name to the GPE Number */ ++ /* 4) The last two characters of the name are the hex GPE Number */ + + gpe_number = ACPI_STRTOUL(&name[2], NULL, 16); + if (gpe_number == ACPI_UINT32_MAX) { +@@ -311,28 +314,22 @@ acpi_ev_save_method_info(acpi_handle obj_handle, + + /* Ensure that we have a valid GPE number for this GPE block */ + +- if ((gpe_number < gpe_block->block_base_number) || +- (gpe_number >= (gpe_block->block_base_number + +- (gpe_block->register_count * 8)))) { ++ gpe_event_info = acpi_ev_low_get_gpe_info(gpe_number, gpe_block); ++ if (!gpe_event_info) { + /* +- * Not valid for this GPE block, just ignore it. However, it may be +- * valid for a different GPE block, since GPE0 and GPE1 methods both +- * appear under \_GPE. ++ * This gpe_number is not valid for this GPE block, just ignore it. ++ * However, it may be valid for a different GPE block, since GPE0 ++ * and GPE1 methods both appear under \_GPE. + */ + return_ACPI_STATUS(AE_OK); + } + + /* +- * Now we can add this information to the gpe_event_info block for use +- * during dispatch of this GPE. ++ * Add the GPE information from above to the gpe_event_info block for ++ * use during dispatch of this GPE. + */ +- gpe_event_info = +- &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; +- +- gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD); +- +- gpe_event_info->dispatch.method_node = +- (struct acpi_namespace_node *)obj_handle; ++ gpe_event_info->flags = (u8)(type | ACPI_GPE_DISPATCH_METHOD); ++ gpe_event_info->dispatch.method_node = method_node; + + ACPI_DEBUG_PRINT((ACPI_DB_LOAD, + "Registered GPE method %s as GPE number 0x%.2X\n", +@@ -351,7 +348,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle, + * + * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a + * Device. Run the _PRW method. If present, extract the GPE +- * number and mark the GPE as a WAKE GPE. ++ * number and mark the GPE as a CAN_WAKE GPE. + * + ******************************************************************************/ + +@@ -377,7 +374,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, + ACPI_BTYPE_PACKAGE, &pkg_desc); + if (ACPI_FAILURE(status)) { + +- /* Ignore all errors from _PRW, we don't want to abort the subsystem */ ++ /* Ignore all errors from _PRW, we don't want to abort the walk */ + + return_ACPI_STATUS(AE_OK); + } +@@ -439,13 +436,13 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, + * 2) The GPE index(number) is within the range of the Gpe Block + * associated with the GPE device. + */ +- if ((gpe_device == target_gpe_device) && +- (gpe_number >= gpe_block->block_base_number) && +- (gpe_number < gpe_block->block_base_number + +- (gpe_block->register_count * 8))) { +- gpe_event_info = &gpe_block->event_info[gpe_number - +- gpe_block-> +- block_base_number]; ++ if (gpe_device != target_gpe_device) { ++ goto cleanup; ++ } ++ ++ gpe_event_info = acpi_ev_low_get_gpe_info(gpe_number, gpe_block); ++ if (gpe_event_info) { ++ /* This GPE can wake the system */ + + gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; + } +@@ -705,8 +702,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + } + +- acpi_current_gpe_count -= +- gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH; ++ acpi_current_gpe_count -= gpe_block->gpe_count; + + /* Free the gpe_block */ + +@@ -760,9 +756,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) + * Allocate the GPE event_info block. There are eight distinct GPEs + * per register. Initialization to zeros is sufficient. + */ +- gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block-> +- register_count * +- ACPI_GPE_REGISTER_WIDTH) * ++ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count * + sizeof(struct + acpi_gpe_event_info)); + if (!gpe_event_info) { +@@ -897,6 +891,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, + /* Initialize the new GPE block */ + + gpe_block->node = gpe_device; ++ gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); + gpe_block->register_count = register_count; + gpe_block->block_base_number = gpe_block_base_number; + +@@ -925,7 +920,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, + + status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, + ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, +- acpi_ev_save_method_info, NULL, ++ acpi_ev_match_gpe_method, NULL, + gpe_block, NULL); + + /* Return the new block */ +@@ -938,14 +933,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, + "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", + (u32) gpe_block->block_base_number, + (u32) (gpe_block->block_base_number + +- ((gpe_block->register_count * +- ACPI_GPE_REGISTER_WIDTH) - 1)), ++ (gpe_block->gpe_count - 1)), + gpe_device->name.ascii, gpe_block->register_count, + interrupt_number)); + + /* Update global count of currently available GPEs */ + +- acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH; ++ acpi_current_gpe_count += gpe_block->gpe_count; + return_ACPI_STATUS(AE_OK); + } + +@@ -969,10 +963,13 @@ acpi_status + acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, + struct acpi_gpe_block_info *gpe_block) + { ++ acpi_status status; + struct acpi_gpe_event_info *gpe_event_info; + struct acpi_gpe_walk_info gpe_info; + u32 wake_gpe_count; + u32 gpe_enabled_count; ++ u32 gpe_index; ++ u32 gpe_number; + u32 i; + u32 j; + +@@ -998,50 +995,62 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, + gpe_info.gpe_block = gpe_block; + gpe_info.gpe_device = gpe_device; + +- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ++ status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, + acpi_ev_match_prw_and_gpe, NULL, + &gpe_info, NULL); ++ if (ACPI_FAILURE(status)) { ++ ACPI_EXCEPTION((AE_INFO, status, ++ "While executing _PRW methods")); ++ } + } + + /* +- * Enable all GPEs that have a corresponding method and aren't ++ * Enable all GPEs that have a corresponding method and are not + * capable of generating wakeups. Any other GPEs within this block +- * must be enabled via the acpi_enable_gpe() interface. ++ * must be enabled via the acpi_enable_gpe interface. + */ + wake_gpe_count = 0; + gpe_enabled_count = 0; +- if (gpe_device == acpi_gbl_fadt_gpe_device) ++ ++ if (gpe_device == acpi_gbl_fadt_gpe_device) { + gpe_device = NULL; ++ } + + for (i = 0; i < gpe_block->register_count; i++) { + for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { +- acpi_status status; +- acpi_size gpe_index; +- int gpe_number; + + /* Get the info block for this particular GPE */ +- gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j; ++ ++ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; + gpe_event_info = &gpe_block->event_info[gpe_index]; + + if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { + wake_gpe_count++; +- if (acpi_gbl_leave_wake_gpes_disabled) ++ if (acpi_gbl_leave_wake_gpes_disabled) { + continue; ++ } + } + +- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) ++ /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ ++ ++ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) { + continue; ++ } ++ ++ /* Enable this GPE */ + + gpe_number = gpe_index + gpe_block->block_base_number; + status = acpi_enable_gpe(gpe_device, gpe_number, +- ACPI_GPE_TYPE_RUNTIME); +- if (ACPI_FAILURE(status)) +- ACPI_ERROR((AE_INFO, +- "Failed to enable GPE %02X\n", ++ ACPI_GPE_TYPE_RUNTIME); ++ if (ACPI_FAILURE(status)) { ++ ACPI_EXCEPTION((AE_INFO, status, ++ "Could not enable GPE 0x%02X", + gpe_number)); +- else +- gpe_enabled_count++; ++ continue; ++ } ++ ++ gpe_enabled_count++; + } + } + +diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c +index ca04823..cc82502 100644 +--- a/drivers/acpi/acpica/evxface.c ++++ b/drivers/acpi/acpica/evxface.c +@@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device, + + /* Parameter validation */ + +- if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) { +- status = AE_BAD_PARAMETER; +- goto exit; ++ if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) { ++ return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { +- goto exit; ++ return_ACPI_STATUS(status); + } + + /* Ensure that we have a valid GPE number */ +@@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device, + handler->context = context; + handler->method_node = gpe_event_info->dispatch.method_node; + ++ /* Disable the GPE before installing the handler */ ++ ++ status = acpi_ev_disable_gpe(gpe_event_info); ++ if (ACPI_FAILURE (status)) { ++ goto unlock_and_exit; ++ } ++ + /* Install the handler */ + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); +@@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device, + + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + +- unlock_and_exit: ++unlock_and_exit: + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); +- exit: +- if (ACPI_FAILURE(status)) +- ACPI_EXCEPTION((AE_INFO, status, +- "Installing notify handler failed")); + return_ACPI_STATUS(status); + } + +diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c +index 5ff32c7..7c7bbb4 100644 +--- a/drivers/acpi/acpica/evxfevnt.c ++++ b/drivers/acpi/acpica/evxfevnt.c +@@ -203,21 +203,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event) + * + * FUNCTION: acpi_set_gpe + * +- * PARAMETERS: gpe_device - Parent GPE Device ++ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block +- * action - Enable or disable +- * Called from ISR or not ++ * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE + * + * RETURN: Status + * +- * DESCRIPTION: Enable or disable an ACPI event (general purpose) ++ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses ++ * the reference count mechanism used in the acpi_enable_gpe and ++ * acpi_disable_gpe interfaces -- and should be used with care. ++ * ++ * Note: Typically used to disable a runtime GPE for short period of time, ++ * then re-enable it, without disturbing the existing reference counts. This ++ * is useful, for example, in the Embedded Controller (EC) driver. + * + ******************************************************************************/ + acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) + { +- acpi_status status = AE_OK; +- acpi_cpu_flags flags; + struct acpi_gpe_event_info *gpe_event_info; ++ acpi_status status; ++ acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_set_gpe); + +@@ -243,7 +248,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) + break; + + default: +- ACPI_ERROR((AE_INFO, "Invalid action\n")); + status = AE_BAD_PARAMETER; + break; + } +@@ -259,25 +263,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe) + * + * FUNCTION: acpi_enable_gpe + * +- * PARAMETERS: gpe_device - Parent GPE Device ++ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block +- * type - Purpose the GPE will be used for ++ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE ++ * or both + * + * RETURN: Status + * +- * DESCRIPTION: Take a reference to a GPE and enable it if necessary ++ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is ++ * hardware-enabled (for runtime GPEs), or the GPE register mask ++ * is updated (for wake GPEs). + * + ******************************************************************************/ +-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) ++acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) + { + acpi_status status = AE_OK; +- acpi_cpu_flags flags; + struct acpi_gpe_event_info *gpe_event_info; ++ acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_enable_gpe); + +- if (type & ~ACPI_GPE_TYPE_WAKE_RUN) ++ /* Parameter validation */ ++ ++ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) { + return_ACPI_STATUS(AE_BAD_PARAMETER); ++ } + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + +@@ -289,26 +299,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) + goto unlock_and_exit; + } + +- if (type & ACPI_GPE_TYPE_RUNTIME) { +- if (++gpe_event_info->runtime_count == 1) { ++ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) { ++ if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { ++ status = AE_LIMIT; /* Too many references */ ++ goto unlock_and_exit; ++ } ++ ++ gpe_event_info->runtime_count++; ++ if (gpe_event_info->runtime_count == 1) { + status = acpi_ev_enable_gpe(gpe_event_info); +- if (ACPI_FAILURE(status)) ++ if (ACPI_FAILURE(status)) { + gpe_event_info->runtime_count--; ++ goto unlock_and_exit; ++ } + } + } + +- if (type & ACPI_GPE_TYPE_WAKE) { ++ if (gpe_type & ACPI_GPE_TYPE_WAKE) { ++ /* The GPE must have the ability to wake the system */ ++ + if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { +- status = AE_BAD_PARAMETER; ++ status = AE_TYPE; ++ goto unlock_and_exit; ++ } ++ ++ if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) { ++ status = AE_LIMIT; /* Too many references */ + goto unlock_and_exit; + } + + /* +- * Wake-up GPEs are only enabled right prior to putting the +- * system into a sleep state. ++ * Update the enable mask on the first wakeup reference. Wake GPEs ++ * are only hardware-enabled just before sleeping. + */ +- if (++gpe_event_info->wakeup_count == 1) +- acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ gpe_event_info->wakeup_count++; ++ if (gpe_event_info->wakeup_count == 1) { ++ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ } + } + + unlock_and_exit: +@@ -321,27 +348,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe) + * + * FUNCTION: acpi_disable_gpe + * +- * PARAMETERS: gpe_device - Parent GPE Device ++ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block +- * type - Purpose the GPE won't be used for any more ++ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE ++ * or both + * + * RETURN: Status + * +- * DESCRIPTION: Release a reference to a GPE and disable it if necessary ++ * DESCRIPTION: Remove a reference to a GPE. When the last reference is ++ * removed, only then is the GPE disabled (for runtime GPEs), or ++ * the GPE mask bit disabled (for wake GPEs) + * + ******************************************************************************/ +-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) ++acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) + { + acpi_status status = AE_OK; +- acpi_cpu_flags flags; + struct acpi_gpe_event_info *gpe_event_info; ++ acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_disable_gpe); + +- if (type & ~ACPI_GPE_TYPE_WAKE_RUN) ++ /* Parameter validation */ ++ ++ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) { + return_ACPI_STATUS(AE_BAD_PARAMETER); ++ } + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); ++ + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); +@@ -350,18 +384,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) + goto unlock_and_exit; + } + +- if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) { +- if (--gpe_event_info->runtime_count == 0) ++ /* Hardware-disable a runtime GPE on removal of the last reference */ ++ ++ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) { ++ if (!gpe_event_info->runtime_count) { ++ status = AE_LIMIT; /* There are no references to remove */ ++ goto unlock_and_exit; ++ } ++ ++ gpe_event_info->runtime_count--; ++ if (!gpe_event_info->runtime_count) { + status = acpi_ev_disable_gpe(gpe_event_info); ++ if (ACPI_FAILURE(status)) { ++ gpe_event_info->runtime_count++; ++ goto unlock_and_exit; ++ } ++ } + } + +- if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) { +- /* +- * Wake-up GPEs are not enabled after leaving system sleep +- * states, so we don't need to disable them here. +- */ +- if (--gpe_event_info->wakeup_count == 0) +- acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ /* ++ * Update masks for wake GPE on removal of the last reference. ++ * No need to hardware-disable wake GPEs here, they are not currently ++ * enabled. ++ */ ++ if (gpe_type & ACPI_GPE_TYPE_WAKE) { ++ if (!gpe_event_info->wakeup_count) { ++ status = AE_LIMIT; /* There are no references to remove */ ++ goto unlock_and_exit; ++ } ++ ++ gpe_event_info->wakeup_count--; ++ if (!gpe_event_info->wakeup_count) { ++ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ } + } + + unlock_and_exit: +@@ -465,30 +520,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event) + * + * FUNCTION: acpi_clear_gpe + * +- * PARAMETERS: gpe_device - Parent GPE Device ++ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block +- * Flags - Called from an ISR or not + * + * RETURN: Status + * + * DESCRIPTION: Clear an ACPI event (general purpose) + * + ******************************************************************************/ +-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) ++acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) + { + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; ++ acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_clear_gpe); + +- /* Use semaphore lock if not executing at interrupt level */ +- +- if (flags & ACPI_NOT_ISR) { +- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); +- if (ACPI_FAILURE(status)) { +- return_ACPI_STATUS(status); +- } +- } ++ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + +@@ -501,9 +549,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) + status = acpi_hw_clear_gpe(gpe_event_info); + + unlock_and_exit: +- if (flags & ACPI_NOT_ISR) { +- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); +- } ++ acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); + } + +@@ -569,9 +615,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status) + * + * FUNCTION: acpi_get_gpe_status + * +- * PARAMETERS: gpe_device - Parent GPE Device ++ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block +- * Flags - Called from an ISR or not + * event_status - Where the current status of the event will + * be returned + * +@@ -582,21 +627,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status) + ******************************************************************************/ + acpi_status + acpi_get_gpe_status(acpi_handle gpe_device, +- u32 gpe_number, u32 flags, acpi_event_status * event_status) ++ u32 gpe_number, acpi_event_status *event_status) + { + acpi_status status = AE_OK; + struct acpi_gpe_event_info *gpe_event_info; ++ acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_get_gpe_status); + +- /* Use semaphore lock if not executing at interrupt level */ +- +- if (flags & ACPI_NOT_ISR) { +- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); +- if (ACPI_FAILURE(status)) { +- return_ACPI_STATUS(status); +- } +- } ++ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + +@@ -614,9 +653,7 @@ acpi_get_gpe_status(acpi_handle gpe_device, + *event_status |= ACPI_EVENT_FLAG_HANDLE; + + unlock_and_exit: +- if (flags & ACPI_NOT_ISR) { +- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); +- } ++ acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); + } + +@@ -673,20 +710,15 @@ acpi_install_gpe_block(acpi_handle gpe_device, + goto unlock_and_exit; + } + +- /* Run the _PRW methods and enable the GPEs */ +- +- status = acpi_ev_initialize_gpe_block(node, gpe_block); +- if (ACPI_FAILURE(status)) { +- goto unlock_and_exit; +- } +- +- /* Get the device_object attached to the node */ ++ /* Install block in the device_object attached to the node */ + + obj_desc = acpi_ns_get_attached_object(node); + if (!obj_desc) { + +- /* No object, create a new one */ +- ++ /* ++ * No object, create a new one (Device nodes do not always have ++ * an attached object) ++ */ + obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); + if (!obj_desc) { + status = AE_NO_MEMORY; +@@ -705,10 +737,14 @@ acpi_install_gpe_block(acpi_handle gpe_device, + } + } + +- /* Install the GPE block in the device_object */ ++ /* Now install the GPE block in the device_object */ + + obj_desc->device.gpe_block = gpe_block; + ++ /* Run the _PRW methods and enable the runtime GPEs in the new block */ ++ ++ status = acpi_ev_initialize_gpe_block(node, gpe_block); ++ + unlock_and_exit: + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +@@ -839,8 +875,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + + /* Increment Index by the number of GPEs in this block */ + +- info->next_block_base_index += +- (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH); ++ info->next_block_base_index += gpe_block->gpe_count; + + if (info->index < info->next_block_base_index) { + /* +diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c +index 3f3f48b..10e104c 100644 +--- a/drivers/acpi/acpica/exoparg2.c ++++ b/drivers/acpi/acpica/exoparg2.c +@@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state) + status = AE_AML_OPERAND_TYPE; + break; + } +-#ifdef ACPI_GPE_NOTIFY_CHECK +- /* +- * GPE method wake/notify check. Here, we want to ensure that we +- * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx +- * GPE method during system runtime. If we do, the GPE is marked +- * as "wake-only" and disabled. +- * +- * 1) Is the Notify() value == device_wake? +- * 2) Is this a GPE deferred method? (An _Lxx or _Exx method) +- * 3) Did the original GPE happen at system runtime? +- * (versus during wake) +- * +- * If all three cases are true, this is a wake-only GPE that should +- * be disabled at runtime. +- */ +- if (value == 2) { /* device_wake */ +- status = +- acpi_ev_check_for_wake_only_gpe(walk_state-> +- gpe_event_info); +- if (ACPI_FAILURE(status)) { +- +- /* AE_WAKE_ONLY_GPE only error, means ignore this notify */ +- +- return_ACPI_STATUS(AE_OK) +- } +- } +-#endif + + /* + * Dispatch the notify to the appropriate handler +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 0338f51..7f2e051 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) + } + + status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number, +- ACPI_NOT_ISR, &event_status); ++ &event_status); + if (status == AE_OK) + device->wakeup.flags.run_wake = + !!(event_status & ACPI_EVENT_FLAG_HANDLE); +diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c +index 4aaf249..e35525b 100644 +--- a/drivers/acpi/system.c ++++ b/drivers/acpi/system.c +@@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) + "Invalid GPE 0x%x\n", index)); + goto end; + } +- result = acpi_get_gpe_status(*handle, index, +- ACPI_NOT_ISR, status); ++ result = acpi_get_gpe_status(*handle, index, status); + } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) + result = acpi_get_event_status(index - num_gpes, status); + +@@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj, + result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE); + else if (!strcmp(buf, "clear\n") && + (status & ACPI_EVENT_FLAG_SET)) +- result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); ++ result = acpi_clear_gpe(handle, index); + else + all_counters[index].count = strtoul(buf, NULL, 0); + } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { +diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h +index 5b2e5e8..5958d78 100644 +--- a/include/acpi/acexcep.h ++++ b/include/acpi/acexcep.h +@@ -87,7 +87,7 @@ + #define AE_NO_GLOBAL_LOCK (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL) + #define AE_ABORT_METHOD (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL) + #define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL) +-#define AE_WAKE_ONLY_GPE (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL) ++#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL) + #define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL) + + #define AE_CODE_ENV_MAX 0x001B +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h +index fd815f6..be16f97 100644 +--- a/include/acpi/acpixf.h ++++ b/include/acpi/acpixf.h +@@ -285,16 +285,17 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status); + */ + acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action); + +-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type); ++acpi_status ++acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type); + +-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type); ++acpi_status ++acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type); + +-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags); ++acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number); + + acpi_status + acpi_get_gpe_status(acpi_handle gpe_device, +- u32 gpe_number, +- u32 flags, acpi_event_status * event_status); ++ u32 gpe_number, acpi_event_status *event_status); + + acpi_status acpi_disable_all_gpes(void); + +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index 3f08e64..de5e99a 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -663,44 +663,42 @@ typedef u32 acpi_event_status; + #define ACPI_GPE_MAX 0xFF + #define ACPI_NUM_GPE 256 + ++/* Actions for acpi_set_gpe */ ++ + #define ACPI_GPE_ENABLE 0 + #define ACPI_GPE_DISABLE 1 + ++/* gpe_types for acpi_enable_gpe and acpi_disable_gpe */ ++ ++#define ACPI_GPE_TYPE_WAKE (u8) 0x01 ++#define ACPI_GPE_TYPE_RUNTIME (u8) 0x02 ++#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x03 ++ + /* + * GPE info flags - Per GPE +- * +-+-+-+---+-+-+-+ +- * |7|6|5|4:3|2|1|0| +- * +-+-+-+---+-+-+-+ +- * | | | | | | | +- * | | | | | | +--- Interrupt type: Edge or Level Triggered +- * | | | | | +--- GPE can wake the system +- * | | | | +--- Unused +- * | | | +--- Type of dispatch -- to method, handler, or none +- * | | +--- Unused +- * | +--- Unused +- * +--- Unused ++ * +-------+---+-+-+ ++ * | 7:4 |3:2|1|0| ++ * +-------+---+-+-+ ++ * | | | | ++ * | | | +--- Interrupt type: edge or level triggered ++ * | | +----- GPE can wake the system ++ * | +-------- Type of dispatch:to method, handler, or none ++ * +-------------- + */ + #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 + #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 + #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 + +-#define ACPI_GPE_TYPE_MASK (u8) 0x06 +-#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 +-#define ACPI_GPE_TYPE_WAKE (u8) 0x02 +-#define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ + #define ACPI_GPE_CAN_WAKE (u8) 0x02 + +-#define ACPI_GPE_DISPATCH_MASK (u8) 0x18 +-#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 +-#define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 +-#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ ++#define ACPI_GPE_DISPATCH_MASK (u8) 0x0C ++#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x04 ++#define ACPI_GPE_DISPATCH_METHOD (u8) 0x08 ++#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 + + /* + * Flags for GPE and Lock interfaces + */ +-#define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */ +-#define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */ +- + #define ACPI_NOT_ISR 0x1 + #define ACPI_ISR 0x0 + diff --git a/acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch b/acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch new file mode 100644 index 000000000..802f9ae0e --- /dev/null +++ b/acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch @@ -0,0 +1,44 @@ +From: Rafael J. Wysocki +Date: Fri, 11 Jun 2010 22:05:19 +0000 (+0200) +Subject: ACPI / ACPICA: Do not attempt to disable GPE when installing handler +X-Git-Tag: v2.6.35-rc4~72^2~6^2~5 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a997ab332832519c2e292db13f509e4360495a5a + +ACPI / ACPICA: Do not attempt to disable GPE when installing handler + +Commit 0f849d2cc6863c7874889ea60a871fb71399dd3f (ACPICA: Minimize +the differences between linux GPE code and ACPICA code base) +introduced a change attempting to disable a GPE before installing +a handler for it in acpi_install_gpe_handler() which was incorrect. +First, the GPE disabled by it is never enabled again (except during +resume) which leads to battery insert/remove events not being +reported on the Maxim Levitsky's machine. Second, the disabled +GPE is still reported as enabled by the sysfs interface that only +checks its enable register's enable_for_run mask. + +Revert this change for now, because it causes more damage to happen +than the bug it was supposed to fix. + +Signed-off-by: Rafael J. Wysocki +Reported-and-tested-by: Maxim Levitsky +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c +index cc82502..4a531cd 100644 +--- a/drivers/acpi/acpica/evxface.c ++++ b/drivers/acpi/acpica/evxface.c +@@ -719,13 +719,6 @@ acpi_install_gpe_handler(acpi_handle gpe_device, + handler->context = context; + handler->method_node = gpe_event_info->dispatch.method_node; + +- /* Disable the GPE before installing the handler */ +- +- status = acpi_ev_disable_gpe(gpe_event_info); +- if (ACPI_FAILURE (status)) { +- goto unlock_and_exit; +- } +- + /* Install the handler */ + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); diff --git a/acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch b/acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch new file mode 100644 index 000000000..3c8fa0c86 --- /dev/null +++ b/acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch @@ -0,0 +1,165 @@ +From: Rafael J. Wysocki +Date: Tue, 8 Jun 2010 08:48:26 +0000 (+0200) +Subject: ACPI / ACPICA: Use helper function for computing GPE masks +X-Git-Tag: v2.6.35-rc4~72^2~6^2~4 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0 + +ACPI / ACPICA: Use helper function for computing GPE masks + +In quite a few places ACPICA needs to compute a GPE enable mask with +only one bit, corresponding to a given GPE, set. Currently, that +computation is always open coded which leads to unnecessary code +duplication. Fix this by introducing a helper function for computing +one-bit GPE enable masks and using it where appropriate. + +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h +index 5900f13..c46277d 100644 +--- a/drivers/acpi/acpica/achware.h ++++ b/drivers/acpi/acpica/achware.h +@@ -90,6 +90,9 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width); + /* + * hwgpe - GPE support + */ ++u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, ++ struct acpi_gpe_register_info *gpe_register_info); ++ + acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); + + acpi_status +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c +index deb26f4..57eeb3b 100644 +--- a/drivers/acpi/acpica/evgpe.c ++++ b/drivers/acpi/acpica/evgpe.c +@@ -69,7 +69,7 @@ acpi_status + acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) + { + struct acpi_gpe_register_info *gpe_register_info; +- u8 register_bit; ++ u32 register_bit; + + ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks); + +@@ -78,9 +78,8 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) + return_ACPI_STATUS(AE_NOT_EXIST); + } + +- register_bit = (u8) +- (1 << +- (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); ++ register_bit = acpi_hw_gpe_register_bit(gpe_event_info, ++ gpe_register_info); + + /* Clear the wake/run bits up front */ + +diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c +index bd72319..d989b8e 100644 +--- a/drivers/acpi/acpica/hwgpe.c ++++ b/drivers/acpi/acpica/hwgpe.c +@@ -57,6 +57,27 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, + + /****************************************************************************** + * ++ * FUNCTION: acpi_hw_gpe_register_bit ++ * ++ * PARAMETERS: gpe_event_info - Info block for the GPE ++ * gpe_register_info - Info block for the GPE register ++ * ++ * RETURN: Status ++ * ++ * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given ++ * GPE set. ++ * ++ ******************************************************************************/ ++ ++u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, ++ struct acpi_gpe_register_info *gpe_register_info) ++{ ++ return (u32)1 << (gpe_event_info->gpe_number - ++ gpe_register_info->base_gpe_number); ++} ++ ++/****************************************************************************** ++ * + * FUNCTION: acpi_hw_low_disable_gpe + * + * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled +@@ -72,6 +93,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + u32 enable_mask; ++ u32 register_bit; + + /* Get the info block for the entire GPE register */ + +@@ -89,9 +111,9 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) + + /* Clear just the bit that corresponds to this GPE */ + +- ACPI_CLEAR_BIT(enable_mask, ((u32)1 << +- (gpe_event_info->gpe_number - +- gpe_register_info->base_gpe_number))); ++ register_bit = acpi_hw_gpe_register_bit(gpe_event_info, ++ gpe_register_info); ++ ACPI_CLEAR_BIT(enable_mask, register_bit); + + /* Write the updated enable mask */ + +@@ -150,21 +172,28 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) + + acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) + { ++ struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; +- u8 register_bit; ++ u32 register_bit; + + ACPI_FUNCTION_ENTRY(); + +- register_bit = (u8)(1 << +- (gpe_event_info->gpe_number - +- gpe_event_info->register_info->base_gpe_number)); ++ /* Get the info block for the entire GPE register */ ++ ++ gpe_register_info = gpe_event_info->register_info; ++ if (!gpe_register_info) { ++ return (AE_NOT_EXIST); ++ } ++ ++ register_bit = acpi_hw_gpe_register_bit(gpe_event_info, ++ gpe_register_info); + + /* + * Write a one to the appropriate bit in the status register to + * clear this GPE. + */ + status = acpi_hw_write(register_bit, +- &gpe_event_info->register_info->status_address); ++ &gpe_register_info->status_address); + + return (status); + } +@@ -187,7 +216,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, + acpi_event_status * event_status) + { + u32 in_byte; +- u8 register_bit; ++ u32 register_bit; + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + acpi_event_status local_event_status = 0; +@@ -204,9 +233,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, + + /* Get the register bitmask for this GPE */ + +- register_bit = (u8)(1 << +- (gpe_event_info->gpe_number - +- gpe_event_info->register_info->base_gpe_number)); ++ register_bit = acpi_hw_gpe_register_bit(gpe_event_info, ++ gpe_register_info); + + /* GPE currently enabled? (enabled for runtime?) */ + diff --git a/acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch b/acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch new file mode 100644 index 000000000..7cba48831 --- /dev/null +++ b/acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch @@ -0,0 +1,381 @@ +From: Rafael J. Wysocki +Date: Tue, 8 Jun 2010 08:49:08 +0000 (+0200) +Subject: ACPI / ACPICA: Fix low-level GPE manipulation code +X-Git-Tag: v2.6.35-rc4~72^2~6^2~3 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=fd247447c1d94a79d5cfc647430784306b3a8323 + +ACPI / ACPICA: Fix low-level GPE manipulation code + +ACPICA uses acpi_ev_enable_gpe() for enabling GPEs at the low level, +which is incorrect, because this function only enables the GPE if the +corresponding bit in its enable register's enable_for_run mask is set. +This causes acpi_set_gpe() to work incorrectly if used for enabling +GPEs that were not previously enabled with acpi_enable_gpe(). As a +result, among other things, wakeup-only GPEs are never enabled by +acpi_enable_wakeup_device(), so the devices that use them are unable +to wake up the system. + +To fix this issue remove acpi_ev_enable_gpe() and its counterpart +acpi_ev_disable_gpe() and replace acpi_hw_low_disable_gpe() with +acpi_hw_low_set_gpe() that will be used instead to manipulate GPE +enable bits at the low level. Make the users of acpi_ev_enable_gpe() +and acpi_ev_disable_gpe() call acpi_hw_low_set_gpe() instead and +make sure that GPE enable masks are only updated by acpi_enable_gpe() +and acpi_disable_gpe() when GPE reference counters change from 0 +to 1 and from 1 to 0, respectively. + +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h +index 5e094a2..138bbb5 100644 +--- a/drivers/acpi/acpica/acevents.h ++++ b/drivers/acpi/acpica/acevents.h +@@ -78,10 +78,6 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node, + acpi_status + acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info); + +-acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); +- +-acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); +- + struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, + u32 gpe_number); + +diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h +index c46277d..3239158 100644 +--- a/drivers/acpi/acpica/achware.h ++++ b/drivers/acpi/acpica/achware.h +@@ -93,7 +93,8 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width); + u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, + struct acpi_gpe_register_info *gpe_register_info); + +-acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); ++acpi_status ++acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); + + acpi_status + acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info); +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c +index 57eeb3b..66cd038 100644 +--- a/drivers/acpi/acpica/evgpe.c ++++ b/drivers/acpi/acpica/evgpe.c +@@ -99,106 +99,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) + return_ACPI_STATUS(AE_OK); + } + +-/******************************************************************************* +- * +- * FUNCTION: acpi_ev_enable_gpe +- * +- * PARAMETERS: gpe_event_info - GPE to enable +- * +- * RETURN: Status +- * +- * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless +- * of type or number of references. +- * +- * Note: The GPE lock should be already acquired when this function is called. +- * +- ******************************************************************************/ +- +-acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) +-{ +- acpi_status status; +- +- +- ACPI_FUNCTION_TRACE(ev_enable_gpe); +- +- +- /* +- * We will only allow a GPE to be enabled if it has either an +- * associated method (_Lxx/_Exx) or a handler. Otherwise, the +- * GPE will be immediately disabled by acpi_ev_gpe_dispatch the +- * first time it fires. +- */ +- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { +- return_ACPI_STATUS(AE_NO_HANDLER); +- } +- +- /* Ensure the HW enable masks are current */ +- +- status = acpi_ev_update_gpe_enable_masks(gpe_event_info); +- if (ACPI_FAILURE(status)) { +- return_ACPI_STATUS(status); +- } +- +- /* Clear the GPE (of stale events) */ +- +- status = acpi_hw_clear_gpe(gpe_event_info); +- if (ACPI_FAILURE(status)) { +- return_ACPI_STATUS(status); +- } +- +- /* Enable the requested GPE */ +- +- status = acpi_hw_write_gpe_enable_reg(gpe_event_info); +- return_ACPI_STATUS(status); +-} +- +-/******************************************************************************* +- * +- * FUNCTION: acpi_ev_disable_gpe +- * +- * PARAMETERS: gpe_event_info - GPE to disable +- * +- * RETURN: Status +- * +- * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE, +- * regardless of the type or number of references. +- * +- * Note: The GPE lock should be already acquired when this function is called. +- * +- ******************************************************************************/ +- +-acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) +-{ +- acpi_status status; +- +- ACPI_FUNCTION_TRACE(ev_disable_gpe); +- +- +- /* +- * Note: Always disable the GPE, even if we think that that it is already +- * disabled. It is possible that the AML or some other code has enabled +- * the GPE behind our back. +- */ +- +- /* Ensure the HW enable masks are current */ +- +- status = acpi_ev_update_gpe_enable_masks(gpe_event_info); +- if (ACPI_FAILURE(status)) { +- return_ACPI_STATUS(status); +- } +- +- /* +- * Always H/W disable this GPE, even if we don't know the GPE type. +- * Simply clear the enable bit for this particular GPE, but do not +- * write out the current GPE enable mask since this may inadvertently +- * enable GPEs too early. An example is a rogue GPE that has arrived +- * during ACPICA initialization - possibly because AML or other code +- * has enabled the GPE. +- */ +- status = acpi_hw_low_disable_gpe(gpe_event_info); +- return_ACPI_STATUS(status); +-} +- + + /******************************************************************************* + * +@@ -450,10 +350,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) + return_VOID; + } + +- /* Update the GPE register masks for return to enabled state */ +- +- (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); +- + /* + * Take a snapshot of the GPE info for this level - we copy the info to + * prevent a race condition with remove_handler/remove_block. +@@ -606,7 +502,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) + * Disable the GPE, so it doesn't keep firing before the method has a + * chance to run (it runs asynchronously with interrupts enabled). + */ +- status = acpi_ev_disable_gpe(gpe_event_info); ++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "Unable to disable GPE[0x%2X]", +@@ -643,7 +539,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) + * Disable the GPE. The GPE will remain disabled a handler + * is installed or ACPICA is restarted. + */ +- status = acpi_ev_disable_gpe(gpe_event_info); ++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "Unable to disable GPE[0x%2X]", +diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c +index 7c7bbb4..e3d9f5c 100644 +--- a/drivers/acpi/acpica/evxfevnt.c ++++ b/drivers/acpi/acpica/evxfevnt.c +@@ -201,6 +201,44 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event) + + /******************************************************************************* + * ++ * FUNCTION: acpi_clear_and_enable_gpe ++ * ++ * PARAMETERS: gpe_event_info - GPE to enable ++ * ++ * RETURN: Status ++ * ++ * DESCRIPTION: Clear the given GPE from stale events and enable it. ++ * ++ ******************************************************************************/ ++static acpi_status ++acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) ++{ ++ acpi_status status; ++ ++ /* ++ * We will only allow a GPE to be enabled if it has either an ++ * associated method (_Lxx/_Exx) or a handler. Otherwise, the ++ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the ++ * first time it fires. ++ */ ++ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { ++ return_ACPI_STATUS(AE_NO_HANDLER); ++ } ++ ++ /* Clear the GPE (of stale events) */ ++ status = acpi_hw_clear_gpe(gpe_event_info); ++ if (ACPI_FAILURE(status)) { ++ return_ACPI_STATUS(status); ++ } ++ ++ /* Enable the requested GPE */ ++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); ++ ++ return_ACPI_STATUS(status); ++} ++ ++/******************************************************************************* ++ * + * FUNCTION: acpi_set_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 +@@ -240,11 +278,11 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) + + switch (action) { + case ACPI_GPE_ENABLE: +- status = acpi_ev_enable_gpe(gpe_event_info); ++ status = acpi_clear_and_enable_gpe(gpe_event_info); + break; + + case ACPI_GPE_DISABLE: +- status = acpi_ev_disable_gpe(gpe_event_info); ++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); + break; + + default: +@@ -307,7 +345,11 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) + + gpe_event_info->runtime_count++; + if (gpe_event_info->runtime_count == 1) { +- status = acpi_ev_enable_gpe(gpe_event_info); ++ status = acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ if (ACPI_SUCCESS(status)) { ++ status = acpi_clear_and_enable_gpe(gpe_event_info); ++ } ++ + if (ACPI_FAILURE(status)) { + gpe_event_info->runtime_count--; + goto unlock_and_exit; +@@ -334,7 +376,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) + */ + gpe_event_info->wakeup_count++; + if (gpe_event_info->wakeup_count == 1) { +- (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ status = acpi_ev_update_gpe_enable_masks(gpe_event_info); + } + } + +@@ -394,7 +436,12 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type + + gpe_event_info->runtime_count--; + if (!gpe_event_info->runtime_count) { +- status = acpi_ev_disable_gpe(gpe_event_info); ++ status = acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ if (ACPI_SUCCESS(status)) { ++ status = acpi_hw_low_set_gpe(gpe_event_info, ++ ACPI_GPE_DISABLE); ++ } ++ + if (ACPI_FAILURE(status)) { + gpe_event_info->runtime_count++; + goto unlock_and_exit; +@@ -415,7 +462,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type + + gpe_event_info->wakeup_count--; + if (!gpe_event_info->wakeup_count) { +- (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); ++ status = acpi_ev_update_gpe_enable_masks(gpe_event_info); + } + } + +diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c +index d989b8e..40388e2 100644 +--- a/drivers/acpi/acpica/hwgpe.c ++++ b/drivers/acpi/acpica/hwgpe.c +@@ -78,23 +78,27 @@ u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, + + /****************************************************************************** + * +- * FUNCTION: acpi_hw_low_disable_gpe ++ * FUNCTION: acpi_hw_low_set_gpe + * + * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled ++ * action - Enable or disable + * + * RETURN: Status + * +- * DESCRIPTION: Disable a single GPE in the enable register. ++ * DESCRIPTION: Enable or disable a single GPE in its enable register. + * + ******************************************************************************/ + +-acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) ++acpi_status ++acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) + { + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + u32 enable_mask; + u32 register_bit; + ++ ACPI_FUNCTION_ENTRY(); ++ + /* Get the info block for the entire GPE register */ + + gpe_register_info = gpe_event_info->register_info; +@@ -109,11 +113,23 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) + return (status); + } + +- /* Clear just the bit that corresponds to this GPE */ ++ /* Set ot clear just the bit that corresponds to this GPE */ + + register_bit = acpi_hw_gpe_register_bit(gpe_event_info, + gpe_register_info); +- ACPI_CLEAR_BIT(enable_mask, register_bit); ++ switch (action) { ++ case ACPI_GPE_ENABLE: ++ ACPI_SET_BIT(enable_mask, register_bit); ++ break; ++ ++ case ACPI_GPE_DISABLE: ++ ACPI_CLEAR_BIT(enable_mask, register_bit); ++ break; ++ ++ default: ++ ACPI_ERROR((AE_INFO, "Invalid action\n")); ++ return (AE_BAD_PARAMETER); ++ } + + /* Write the updated enable mask */ + +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index de5e99a..6881f5b 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -663,7 +663,7 @@ typedef u32 acpi_event_status; + #define ACPI_GPE_MAX 0xFF + #define ACPI_NUM_GPE 256 + +-/* Actions for acpi_set_gpe */ ++/* Actions for acpi_set_gpe and acpi_hw_low_set_gpe */ + + #define ACPI_GPE_ENABLE 0 + #define ACPI_GPE_DISABLE 1 diff --git a/acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch b/acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch new file mode 100644 index 000000000..025eff3dd --- /dev/null +++ b/acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch @@ -0,0 +1,77 @@ +From: Rafael J. Wysocki +Date: Tue, 8 Jun 2010 08:49:45 +0000 (+0200) +Subject: ACPI / ACPICA: Avoid writing full enable masks to GPE registers +X-Git-Tag: v2.6.35-rc4~72^2~6^2~2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=c9a8bbb7704cbf515c0fc68970abbe4e91d68521 + +ACPI / ACPICA: Avoid writing full enable masks to GPE registers + +ACPICA uses acpi_hw_write_gpe_enable_reg() to re-enable a GPE after +an event signaled by it has been handled. However, this function +writes the entire GPE enable mask to the GPE's enable register which +may not be correct. Namely, if one of the other GPEs in the same +register was previously enabled by acpi_enable_gpe() and subsequently +disabled using acpi_set_gpe(), acpi_hw_write_gpe_enable_reg() will +re-enable it along with the target GPE. + +To fix this issue rework acpi_hw_write_gpe_enable_reg() so that it +calls acpi_hw_low_set_gpe() with a special action value, +ACPI_GPE_COND_ENABLE, that will make it only enable the GPE if the +corresponding bit in its register's enable_for_run mask is set. + +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c +index 40388e2..3450309 100644 +--- a/drivers/acpi/acpica/hwgpe.c ++++ b/drivers/acpi/acpica/hwgpe.c +@@ -118,6 +118,10 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) + register_bit = acpi_hw_gpe_register_bit(gpe_event_info, + gpe_register_info); + switch (action) { ++ case ACPI_GPE_COND_ENABLE: ++ if (!(register_bit & gpe_register_info->enable_for_run)) ++ return (AE_BAD_PARAMETER); ++ + case ACPI_GPE_ENABLE: + ACPI_SET_BIT(enable_mask, register_bit); + break; +@@ -154,23 +158,11 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) + acpi_status + acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) + { +- struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + + ACPI_FUNCTION_ENTRY(); + +- /* Get the info block for the entire GPE register */ +- +- gpe_register_info = gpe_event_info->register_info; +- if (!gpe_register_info) { +- return (AE_NOT_EXIST); +- } +- +- /* Write the entire GPE (runtime) enable register */ +- +- status = acpi_hw_write(gpe_register_info->enable_for_run, +- &gpe_register_info->enable_address); +- ++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); + return (status); + } + +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h +index 6881f5b..15a4c68f 100644 +--- a/include/acpi/actypes.h ++++ b/include/acpi/actypes.h +@@ -667,6 +667,7 @@ typedef u32 acpi_event_status; + + #define ACPI_GPE_ENABLE 0 + #define ACPI_GPE_DISABLE 1 ++#define ACPI_GPE_COND_ENABLE 2 + + /* gpe_types for acpi_enable_gpe and acpi_disable_gpe */ + diff --git a/acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch b/acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch new file mode 100644 index 000000000..caaed26ba --- /dev/null +++ b/acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch @@ -0,0 +1,53 @@ +From: Rafael J. Wysocki +Date: Tue, 8 Jun 2010 08:50:20 +0000 (+0200) +Subject: ACPI / ACPICA: Fix GPE initialization +X-Git-Tag: v2.6.35-rc4~72^2~6^2~1 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=ce43ace02320a3fb9614ddb27edc3a8700d68b26 + +ACPI / ACPICA: Fix GPE initialization + +While developing the GPE reference counting code we overlooked the +fact that acpi_ev_update_gpes() could have enabled GPEs before +acpi_ev_initialize_gpe_block() was called. As a result, some GPEs +are enabled twice during the initialization. + +To fix this issue avoid calling acpi_enable_gpe() from +acpi_ev_initialize_gpe_block() for the GPEs that have nonzero +runtime reference counters. + +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c +index 85ded1f..79048de 100644 +--- a/drivers/acpi/acpica/evgpeblk.c ++++ b/drivers/acpi/acpica/evgpeblk.c +@@ -1024,6 +1024,19 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, + + gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; + gpe_event_info = &gpe_block->event_info[gpe_index]; ++ gpe_number = gpe_index + gpe_block->block_base_number; ++ ++ /* ++ * If the GPE has already been enabled for runtime ++ * signaling, make sure it remains enabled, but do not ++ * increment its reference counter. ++ */ ++ if (gpe_event_info->runtime_count) { ++ acpi_set_gpe(gpe_device, gpe_number, ++ ACPI_GPE_ENABLE); ++ gpe_enabled_count++; ++ continue; ++ } + + if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { + wake_gpe_count++; +@@ -1040,7 +1053,6 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, + + /* Enable this GPE */ + +- gpe_number = gpe_index + gpe_block->block_base_number; + status = acpi_enable_gpe(gpe_device, gpe_number, + ACPI_GPE_TYPE_RUNTIME); + if (ACPI_FAILURE(status)) { diff --git a/acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch b/acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch new file mode 100644 index 000000000..3351666df --- /dev/null +++ b/acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch @@ -0,0 +1,43 @@ +From: Rafael J. Wysocki +Date: Tue, 8 Jun 2010 08:50:53 +0000 (+0200) +Subject: ACPI / ACPICA: Fix sysfs GPE interface +X-Git-Tag: v2.6.35-rc4~72^2~6^2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=9d3c752de65dbfa6e522f1d666deb0ac152ef367 + +ACPI / ACPICA: Fix sysfs GPE interface + +The sysfs interface allowing user space to disable/enable GPEs +doesn't work correctly, because a GPE disabled this way will be +re-enabled shortly by acpi_ev_asynch_enable_gpe() if it was +previosuly enabled by acpi_enable_gpe() (in which case the +corresponding bit in its enable register's enable_for_run mask is +set). + +To address this issue make the sysfs GPE interface use +acpi_enable_gpe() and acpi_disable_gpe() instead of acpi_set_gpe() +so that GPE reference counters are modified by it along with the +values of GPE enable registers. + +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Len Brown +--- + +diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c +index e35525b..904e8fc 100644 +--- a/drivers/acpi/system.c ++++ b/drivers/acpi/system.c +@@ -388,10 +388,12 @@ static ssize_t counter_set(struct kobject *kobj, + if (index < num_gpes) { + if (!strcmp(buf, "disable\n") && + (status & ACPI_EVENT_FLAG_ENABLED)) +- result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE); ++ result = acpi_disable_gpe(handle, index, ++ ACPI_GPE_TYPE_RUNTIME); + else if (!strcmp(buf, "enable\n") && + !(status & ACPI_EVENT_FLAG_ENABLED)) +- result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE); ++ result = acpi_enable_gpe(handle, index, ++ ACPI_GPE_TYPE_RUNTIME); + else if (!strcmp(buf, "clear\n") && + (status & ACPI_EVENT_FLAG_SET)) + result = acpi_clear_gpe(handle, index); diff --git a/add-appleir-usb-driver.patch b/add-appleir-usb-driver.patch new file mode 100644 index 000000000..e8073dea9 --- /dev/null +++ b/add-appleir-usb-driver.patch @@ -0,0 +1,666 @@ +From e2e8fc4ed31157e9e9e9cbc70febf08c77233aea Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Thu, 20 May 2010 10:17:58 -0400 +Subject: add-appleir-driver + +--- + Documentation/input/appleir.txt | 45 ++++ + drivers/hid/hid-apple.c | 4 - + drivers/hid/hid-core.c | 9 +- + drivers/hid/hid-ids.h | 1 + + drivers/hid/usbhid/hid-core.c | 1 + + drivers/input/misc/Kconfig | 13 ++ + drivers/input/misc/Makefile | 1 + + drivers/input/misc/appleir.c | 453 +++++++++++++++++++++++++++++++++++++++ + include/linux/hid.h | 2 + + 9 files changed, 522 insertions(+), 7 deletions(-) + create mode 100644 Documentation/input/appleir.txt + create mode 100644 drivers/input/misc/appleir.c + +diff --git a/Documentation/input/appleir.txt b/Documentation/input/appleir.txt +new file mode 100644 +index 0000000..0267a4b +--- /dev/null ++++ b/Documentation/input/appleir.txt +@@ -0,0 +1,45 @@ ++Apple IR receiver Driver (appleir) ++---------------------------------- ++ Copyright (C) 2009 Bastien Nocera ++ ++The appleir driver is a kernel input driver to handle Apple's IR ++receivers (and associated remotes) in the kernel. ++ ++The driver is an input driver which only handles "official" remotes ++as built and sold by Apple. ++ ++Authors ++------- ++ ++James McKenzie (original driver) ++Alex Karpenko (05ac:8242 support) ++Greg Kroah-Hartman (cleanups and original submission) ++Bastien Nocera (further cleanups and suspend support) ++ ++Supported hardware ++------------------ ++ ++- All Apple laptops and desktops from 2005 onwards, except: ++ - the unibody Macbook (2009) ++ - Mac Pro (all versions) ++- Apple TV (all revisions) ++ ++The remote will only support the 6 buttons of the original remotes ++as sold by Apple. See the next section if you want to use other remotes ++or want to use lirc with the device instead of the kernel driver. ++ ++Using lirc (native) instead of the kernel driver ++------------------------------------------------ ++ ++First, you will need to disable the kernel driver for the receiver. ++ ++This can be achieved by passing quirks to the usbhid driver. ++The quirk line would be: ++usbhid.quirks=0x05ac:0x8242:0x40000010 ++ ++With 0x05ac being the vendor ID (Apple, you shouldn't need to change this) ++With 0x8242 being the product ID (check the output of lsusb for your hardware) ++And 0x10 being "HID_QUIRK_HIDDEV_FORCE" and 0x40000000 being "HID_QUIRK_NO_IGNORE" ++ ++This should force the creation of a hiddev device for the receiver, and ++make it usable under lirc. +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index bba05d0..0059d5a 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -361,10 +361,6 @@ static void apple_remove(struct hid_device *hdev) + } + + static const struct hid_device_id apple_devices[] = { +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL), +- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4), +- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE), + .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL }, + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 143e788..387bb59 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1167,6 +1167,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) + unsigned int i; + int len; + ++ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) ++ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); + if (hdev->bus != BUS_USB) + connect_mask &= ~HID_CONNECT_HIDDEV; + if (hid_hiddev(hdev)) +@@ -1248,8 +1250,6 @@ static const struct hid_device_id hid_blacklist[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, +@@ -1553,6 +1553,9 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, +@@ -1757,7 +1760,7 @@ int hid_add_device(struct hid_device *hdev) + + /* we need to kill them here, otherwise they will stay allocated to + * wait for coming driver */ +- if (hid_ignore(hdev)) ++ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev)) + return -ENODEV; + + /* XXX hack, any other cleaner solution after the driver core +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 09d2764..7275a9d 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -97,6 +97,7 @@ + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b ++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 + #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 + #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 + +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index 7b85b69..66b512c 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -1133,6 +1133,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * + hid->vendor = le16_to_cpu(dev->descriptor.idVendor); + hid->product = le16_to_cpu(dev->descriptor.idProduct); + hid->name[0] = 0; ++ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product); + if (intf->cur_altsetting->desc.bInterfaceProtocol == + USB_INTERFACE_PROTOCOL_MOUSE) + hid->type = HID_TYPE_USBMOUSE; +diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig +index 23140a3..46614b2 100644 +--- a/drivers/input/misc/Kconfig ++++ b/drivers/input/misc/Kconfig +@@ -159,6 +159,19 @@ config INPUT_KEYSPAN_REMOTE + To compile this driver as a module, choose M here: the module will + be called keyspan_remote. + ++config INPUT_APPLEIR ++ tristate "Apple infrared receiver (built in)" ++ depends on USB_ARCH_HAS_HCD ++ select USB ++ help ++ Say Y here if you want to use a Apple infrared remote control. All ++ the Apple computers from 2005 onwards include such a port, except ++ the unibody Macbook (2009), and Mac Pros. This receiver is also ++ used in the Apple TV set-top box. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called appleir. ++ + config INPUT_POWERMATE + tristate "Griffin PowerMate and Contour Jog support" + depends on USB_ARCH_HAS_HCD +diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile +index 7e95a5d..3fa4404 100644 +--- a/drivers/input/misc/Makefile ++++ b/drivers/input/misc/Makefile +@@ -6,6 +6,7 @@ + + obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o + obj-$(CONFIG_INPUT_APANEL) += apanel.o ++obj-$(CONFIG_INPUT_APPLEIR) += appleir.o + obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o + obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o + obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o +diff --git a/drivers/input/misc/appleir.c b/drivers/input/misc/appleir.c +new file mode 100644 +index 0000000..cff4df6 +--- /dev/null ++++ b/drivers/input/misc/appleir.c +@@ -0,0 +1,453 @@ ++/* ++ * appleir: USB driver for the apple ir device ++ * ++ * Original driver written by James McKenzie ++ * Ported to recent 2.6 kernel versions by Greg Kroah-Hartman ++ * ++ * Copyright (C) 2006 James McKenzie ++ * Copyright (C) 2008 Greg Kroah-Hartman ++ * Copyright (C) 2008 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation, version 2. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRIVER_VERSION "v1.2" ++#define DRIVER_AUTHOR "James McKenzie" ++#define DRIVER_DESC "Apple infrared receiver driver" ++#define DRIVER_LICENSE "GPL" ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE(DRIVER_LICENSE); ++ ++#define USB_VENDOR_ID_APPLE 0x05ac ++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 ++#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 ++#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 ++ ++#define URB_SIZE 32 ++ ++#define MAX_KEYS 8 ++#define MAX_KEYS_MASK (MAX_KEYS - 1) ++ ++#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0) ++ ++static int debug; ++module_param(debug, int, 0644); ++MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); ++ ++/* I have two devices both of which report the following */ ++/* 25 87 ee 83 0a + */ ++/* 25 87 ee 83 0c - */ ++/* 25 87 ee 83 09 << */ ++/* 25 87 ee 83 06 >> */ ++/* 25 87 ee 83 05 >" */ ++/* 25 87 ee 83 03 menu */ ++/* 26 00 00 00 00 for key repeat*/ ++ ++/* Thomas Glanzmann reports the following responses */ ++/* 25 87 ee ca 0b + */ ++/* 25 87 ee ca 0d - */ ++/* 25 87 ee ca 08 << */ ++/* 25 87 ee ca 07 >> */ ++/* 25 87 ee ca 04 >" */ ++/* 25 87 ee ca 02 menu */ ++/* 26 00 00 00 00 for key repeat*/ ++/* He also observes the following event sometimes */ ++/* sent after a key is release, which I interpret */ ++/* as a flat battery message */ ++/* 25 87 e0 ca 06 flat battery */ ++ ++/* Alexandre Karpenko reports the following responses for Device ID 0x8242 */ ++/* 25 87 ee 47 0b + */ ++/* 25 87 ee 47 0d - */ ++/* 25 87 ee 47 08 << */ ++/* 25 87 ee 47 07 >> */ ++/* 25 87 ee 47 04 >" */ ++/* 25 87 ee 47 02 menu */ ++/* 26 87 ee 47 ** for key repeat (** is the code of the key being held) */ ++ ++static const unsigned short appleir_key_table[] = { ++ KEY_RESERVED, ++ KEY_MENU, ++ KEY_PLAYPAUSE, ++ KEY_FORWARD, ++ KEY_BACK, ++ KEY_VOLUMEUP, ++ KEY_VOLUMEDOWN, ++ KEY_RESERVED, ++}; ++ ++struct appleir { ++ struct input_dev *input_dev; ++ unsigned short keymap[ARRAY_SIZE(appleir_key_table)]; ++ u8 *data; ++ dma_addr_t dma_buf; ++ struct usb_device *usbdev; ++ unsigned int flags; ++ struct urb *urb; ++ struct timer_list key_up_timer; ++ int current_key; ++ char phys[32]; ++}; ++ ++static DEFINE_MUTEX(appleir_mutex); ++ ++enum { ++ APPLEIR_OPENED = 0x1, ++ APPLEIR_SUSPENDED = 0x2, ++}; ++ ++static struct usb_device_id appleir_ids[] = { ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, appleir_ids); ++ ++static void dump_packet(struct appleir *appleir, char *msg, u8 *data, int len) ++{ ++ int i; ++ ++ printk(KERN_ERR "appleir: %s (%d bytes)", msg, len); ++ ++ for (i = 0; i < len; ++i) ++ printk(" %02x", data[i]); ++ printk("\n"); ++} ++ ++static void key_up(struct appleir *appleir, int key) ++{ ++ dbginfo(&appleir->input_dev->dev, "key %d up\n", key); ++ input_report_key(appleir->input_dev, key, 0); ++ input_sync(appleir->input_dev); ++} ++ ++static void key_down(struct appleir *appleir, int key) ++{ ++ dbginfo(&appleir->input_dev->dev, "key %d down\n", key); ++ input_report_key(appleir->input_dev, key, 1); ++ input_sync(appleir->input_dev); ++} ++ ++static void battery_flat(struct appleir *appleir) ++{ ++ dev_err(&appleir->input_dev->dev, "possible flat battery?\n"); ++} ++ ++static void key_up_tick(unsigned long data) ++{ ++ struct appleir *appleir = (struct appleir *)data; ++ ++ if (appleir->current_key) { ++ key_up(appleir, appleir->current_key); ++ appleir->current_key = 0; ++ } ++} ++ ++static void new_data(struct appleir *appleir, u8 *data, int len) ++{ ++ static const u8 keydown[] = { 0x25, 0x87, 0xee }; ++ static const u8 keyrepeat[] = { 0x26, }; ++ static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; ++ ++ if (debug) ++ dump_packet(appleir, "received", data, len); ++ ++ if (len != 5) ++ return; ++ ++ if (!memcmp(data, keydown, sizeof(keydown))) { ++ /* If we already have a key down, take it up before marking ++ this one down */ ++ if (appleir->current_key) ++ key_up(appleir, appleir->current_key); ++ appleir->current_key = appleir->keymap[(data[4] >> 1) & MAX_KEYS_MASK]; ++ ++ key_down(appleir, appleir->current_key); ++ /* Remote doesn't do key up, either pull them up, in the test ++ above, or here set a timer which pulls them up after 1/8 s */ ++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); ++ ++ return; ++ } ++ ++ if (!memcmp(data, keyrepeat, sizeof(keyrepeat))) { ++ key_down(appleir, appleir->current_key); ++ /* Remote doesn't do key up, either pull them up, in the test ++ above, or here set a timer which pulls them up after 1/8 s */ ++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); ++ return; ++ } ++ ++ if (!memcmp(data, flatbattery, sizeof(flatbattery))) { ++ battery_flat(appleir); ++ /* Fall through */ ++ } ++ ++ dump_packet(appleir, "unknown packet", data, len); ++} ++ ++static void appleir_urb(struct urb *urb) ++{ ++ struct appleir *appleir = urb->context; ++ int status = urb->status; ++ int retval; ++ ++ switch (status) { ++ case 0: ++ new_data(appleir, urb->transfer_buffer, urb->actual_length); ++ break; ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ /* This urb is terminated, clean up */ ++ dbginfo(&appleir->input_dev->dev, "%s - urb shutting down with status: %d", __func__, ++ urb->status); ++ return; ++ default: ++ dbginfo(&appleir->input_dev->dev, "%s - nonzero urb status received: %d", __func__, ++ urb->status); ++ } ++ ++ retval = usb_submit_urb(urb, GFP_ATOMIC); ++ if (retval) ++ err("%s - usb_submit_urb failed with result %d", __func__, ++ retval); ++} ++ ++static int appleir_open(struct input_dev *dev) ++{ ++ struct appleir *appleir = input_get_drvdata(dev); ++ struct usb_interface *intf = usb_ifnum_to_if(appleir->usbdev, 0); ++ int r; ++ ++ r = usb_autopm_get_interface(intf); ++ if (r) { ++ dev_err(&intf->dev, ++ "%s(): usb_autopm_get_interface() = %d\n", __func__, r); ++ return r; ++ } ++ ++ mutex_lock(&appleir_mutex); ++ ++ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) { ++ r = -EIO; ++ goto fail; ++ } ++ ++ appleir->flags |= APPLEIR_OPENED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ usb_autopm_put_interface(intf); ++ ++ return 0; ++fail: ++ mutex_unlock(&appleir_mutex); ++ usb_autopm_put_interface(intf); ++ return r; ++} ++ ++static void appleir_close(struct input_dev *dev) ++{ ++ struct appleir *appleir = input_get_drvdata(dev); ++ ++ mutex_lock(&appleir_mutex); ++ ++ if (!(appleir->flags & APPLEIR_SUSPENDED)) { ++ usb_kill_urb(appleir->urb); ++ del_timer_sync(&appleir->key_up_timer); ++ } ++ ++ appleir->flags &= ~APPLEIR_OPENED; ++ ++ mutex_unlock(&appleir_mutex); ++} ++ ++static int appleir_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct usb_endpoint_descriptor *endpoint; ++ struct appleir *appleir = NULL; ++ struct input_dev *input_dev; ++ int retval = -ENOMEM; ++ int i; ++ ++ appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL); ++ if (!appleir) ++ goto allocfail; ++ ++ appleir->data = usb_buffer_alloc(dev, URB_SIZE, GFP_KERNEL, ++ &appleir->dma_buf); ++ if (!appleir->data) ++ goto usbfail; ++ ++ appleir->urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!appleir->urb) ++ goto urbfail; ++ ++ appleir->usbdev = dev; ++ ++ input_dev = input_allocate_device(); ++ if (!input_dev) ++ goto inputfail; ++ ++ appleir->input_dev = input_dev; ++ ++ usb_make_path(dev, appleir->phys, sizeof(appleir->phys)); ++ strlcpy(appleir->phys, "/input0", sizeof(appleir->phys)); ++ ++ input_dev->name = "Apple Infrared Remote Controller"; ++ input_dev->phys = appleir->phys; ++ usb_to_input_id(dev, &input_dev->id); ++ input_dev->dev.parent = &intf->dev; ++ input_dev->keycode = appleir->keymap; ++ input_dev->keycodesize = sizeof(unsigned short); ++ input_dev->keycodemax = ARRAY_SIZE(appleir->keymap); ++ ++ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); ++ ++ memcpy(appleir->keymap, appleir_key_table, sizeof(appleir->keymap)); ++ for (i = 0; i < ARRAY_SIZE(appleir_key_table); i++) ++ set_bit(appleir->keymap[i], input_dev->keybit); ++ clear_bit(KEY_RESERVED, input_dev->keybit); ++ ++ input_set_drvdata(input_dev, appleir); ++ input_dev->open = appleir_open; ++ input_dev->close = appleir_close; ++ ++ endpoint = &intf->cur_altsetting->endpoint[0].desc; ++ ++ usb_fill_int_urb(appleir->urb, dev, ++ usb_rcvintpipe(dev, endpoint->bEndpointAddress), ++ appleir->data, 8, ++ appleir_urb, appleir, endpoint->bInterval); ++ ++ appleir->urb->transfer_dma = appleir->dma_buf; ++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ setup_timer(&appleir->key_up_timer, ++ key_up_tick, (unsigned long) appleir); ++ ++ retval = input_register_device(appleir->input_dev); ++ if (retval) ++ goto inputfail; ++ ++ usb_set_intfdata(intf, appleir); ++ ++ return 0; ++ ++inputfail: ++ input_free_device(appleir->input_dev); ++ ++urbfail: ++ usb_free_urb(appleir->urb); ++ ++usbfail: ++ usb_buffer_free(dev, URB_SIZE, appleir->data, ++ appleir->dma_buf); ++ ++allocfail: ++ kfree(appleir); ++ ++ return retval; ++} ++ ++static void appleir_disconnect(struct usb_interface *intf) ++{ ++ struct appleir *appleir = usb_get_intfdata(intf); ++ ++ usb_set_intfdata(intf, NULL); ++ input_unregister_device(appleir->input_dev); ++ usb_free_urb(appleir->urb); ++ usb_buffer_free(interface_to_usbdev(intf), URB_SIZE, ++ appleir->data, appleir->dma_buf); ++ kfree(appleir); ++} ++ ++static int appleir_suspend(struct usb_interface *interface, ++ pm_message_t message) ++{ ++ struct appleir *appleir = usb_get_intfdata(interface); ++ ++ mutex_lock(&appleir_mutex); ++ if (appleir->flags & APPLEIR_OPENED) ++ usb_kill_urb(appleir->urb); ++ ++ appleir->flags |= APPLEIR_SUSPENDED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ return 0; ++} ++ ++static int appleir_resume(struct usb_interface *interface) ++{ ++ struct appleir *appleir; ++ int r = 0; ++ ++ appleir = usb_get_intfdata(interface); ++ ++ mutex_lock(&appleir_mutex); ++ if (appleir->flags & APPLEIR_OPENED) { ++ struct usb_endpoint_descriptor *endpoint; ++ ++ endpoint = &interface->cur_altsetting->endpoint[0].desc; ++ usb_fill_int_urb(appleir->urb, appleir->usbdev, ++ usb_rcvintpipe(appleir->usbdev, endpoint->bEndpointAddress), ++ appleir->data, 8, ++ appleir_urb, appleir, endpoint->bInterval); ++ appleir->urb->transfer_dma = appleir->dma_buf; ++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ /* And reset the USB device */ ++ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) ++ r = -EIO; ++ } ++ ++ appleir->flags &= ~APPLEIR_SUSPENDED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ return r; ++} ++ ++static struct usb_driver appleir_driver = { ++ .name = "appleir", ++ .probe = appleir_probe, ++ .disconnect = appleir_disconnect, ++ .suspend = appleir_suspend, ++ .resume = appleir_resume, ++ .reset_resume = appleir_resume, ++ .id_table = appleir_ids, ++}; ++ ++static int __init appleir_init(void) ++{ ++ return usb_register(&appleir_driver); ++} ++ ++static void __exit appleir_exit(void) ++{ ++ usb_deregister(&appleir_driver); ++} ++ ++module_init(appleir_init); ++module_exit(appleir_exit); +diff --git a/include/linux/hid.h b/include/linux/hid.h +index b1344ec..f1f2b6f 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -308,11 +308,13 @@ struct hid_item { + #define HID_QUIRK_NOTOUCH 0x00000002 + #define HID_QUIRK_IGNORE 0x00000004 + #define HID_QUIRK_NOGET 0x00000008 ++#define HID_QUIRK_HIDDEV_FORCE 0x00000010 + #define HID_QUIRK_BADPAD 0x00000020 + #define HID_QUIRK_MULTI_INPUT 0x00000040 + #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 + #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 + #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 ++#define HID_QUIRK_NO_IGNORE 0x40000000 + + /* + * This is the global environment of the parser. This information is +-- +1.7.0.1 + diff --git a/ata-generic-handle-new-mbp-with-mcp89.patch b/ata-generic-handle-new-mbp-with-mcp89.patch new file mode 100644 index 000000000..34ff200e0 --- /dev/null +++ b/ata-generic-handle-new-mbp-with-mcp89.patch @@ -0,0 +1,79 @@ +From: Tejun Heo +Date: Thu, 17 Jun 2010 09:42:22 +0000 (+0200) +Subject: ahci,ata_generic: let ata_generic handle new MBP w/ MCP89 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=c6353b4520788e34098bbf61c73fb9618ca7fdd6 + +ahci,ata_generic: let ata_generic handle new MBP w/ MCP89 + +For yet unknown reason, MCP89 on MBP 7,1 doesn't work w/ ahci under +linux but the controller doesn't require explicit mode setting and +works fine with ata_generic. Make ahci ignore the controller on MBP +7,1 and let ata_generic take it for now. + +Reported in bko#15923. + + https://bugzilla.kernel.org/show_bug.cgi?id=15923 + +NVIDIA is investigating why ahci mode doesn't work. + +Signed-off-by: Tejun Heo +Cc: Peer Chen +Cc: stable@kernel.org +Reported-by: Anders Østhus +Reported-by: Andreas Graf +Reported-by: Benoit Gschwind +Reported-by: Damien Cassou +Reported-by: tixetsal@juno.com +Signed-off-by: Jeff Garzik +--- + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 8ca16f5..f252253 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -1053,6 +1053,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) + return -ENODEV; + ++ /* ++ * For some reason, MCP89 on MacBook 7,1 doesn't work with ++ * ahci, use ata_generic instead. ++ */ ++ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && ++ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && ++ pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && ++ pdev->subsystem_device == 0xcb89) ++ return -ENODEV; ++ + /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. + * At the moment, we can only use the AHCI mode. Let the users know + * that for SAS drives they're out of luck. +diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c +index 573158a..d4ccf74 100644 +--- a/drivers/ata/ata_generic.c ++++ b/drivers/ata/ata_generic.c +@@ -168,6 +168,12 @@ static struct pci_device_id ata_generic[] = { + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, + { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, + { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, ++ /* ++ * For some reason, MCP89 on MacBook 7,1 doesn't work with ++ * ahci, use ata_generic instead. ++ */ ++ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, ++ PCI_VENDOR_ID_APPLE, 0xcb89, }, + #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index 4eb4679..3bedcc1 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -1261,6 +1261,7 @@ + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 ++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 + + #define PCI_VENDOR_ID_IMS 0x10e0 + #define PCI_DEVICE_ID_IMS_TT128 0x9128 diff --git a/ata-generic-implement-ata-gen-flags.patch b/ata-generic-implement-ata-gen-flags.patch new file mode 100644 index 000000000..6d7a498cd --- /dev/null +++ b/ata-generic-implement-ata-gen-flags.patch @@ -0,0 +1,114 @@ +From: Tejun Heo +Date: Tue, 22 Jun 2010 10:27:26 +0000 (+0200) +Subject: ata_generic: implement ATA_GEN_* flags and force enable DMA on MBP 7,1 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=1529c69adce1e95f7ae72f0441590c226bbac7fc + +ata_generic: implement ATA_GEN_* flags and force enable DMA on MBP 7,1 + +[[ cebbert@redhat.com : backport to 2.6.34 ]] + +IDE mode of MCP89 on MBP 7,1 doesn't set DMA enable bits in the BMDMA +status register. Make the following changes to work around the problem. + +* Instead of using hard coded 1 in id->driver_data as class code + match, use ATA_GEN_CLASS_MATCH and carry the matched id in + host->private_data. + +* Instead of matching PCI_VENDOR_ID_CENATEK, use ATA_GEN_FORCE_DMA + flag in id instead. + +* Add ATA_GEN_FORCE_DMA to the id entry of MBP 7,1. + +Signed-off-by: Tejun Heo +Cc: Peer Chen +Cc: stable@kernel.org +Reported-by: Anders Østhus +Reported-by: Andreas Graf +Reported-by: Benoit Gschwind +Reported-by: Damien Cassou +Reported-by: tixetsal@juno.com +Signed-off-by: Jeff Garzik +--- + +diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c +index d4ccf74..7107a69 100644 +--- a/drivers/ata/ata_generic.c ++++ b/drivers/ata/ata_generic.c +@@ -32,6 +32,11 @@ + * A generic parallel ATA driver using libata + */ + ++enum { ++ ATA_GEN_CLASS_MATCH = (1 << 0), ++ ATA_GEN_FORCE_DMA = (1 << 1), ++}; ++ + /** + * generic_set_mode - mode setting + * @link: link to set up +@@ -46,13 +51,17 @@ + static int generic_set_mode(struct ata_link *link, struct ata_device **unused) + { + struct ata_port *ap = link->ap; ++ const struct pci_device_id *id = ap->host->private_data; + int dma_enabled = 0; + struct ata_device *dev; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + +- /* Bits 5 and 6 indicate if DMA is active on master/slave */ +- if (ap->ioaddr.bmdma_addr) ++ if (id->driver_data & ATA_GEN_FORCE_DMA) { ++ dma_enabled = 0xff; ++ } else if (ap->ioaddr.bmdma_addr) { ++ /* Bits 5 and 6 indicate if DMA is active on master/slave */ + dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); ++ } + + if (pdev->vendor == PCI_VENDOR_ID_CENATEK) + dma_enabled = 0xFF; +@@ -126,7 +135,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id + const struct ata_port_info *ppi[] = { &info, NULL }; + + /* Don't use the generic entry unless instructed to do so */ +- if (id->driver_data == 1 && all_generic_ide == 0) ++ if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) + return -ENODEV; + + /* Devices that need care */ +@@ -155,7 +164,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id + return rc; + pcim_pin_device(dev); + } +- return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); ++ return ata_pci_sff_init_one(dev, ppi, &generic_sht, (void *)id, 0); + } + + static struct pci_device_id ata_generic[] = { +@@ -167,13 +176,15 @@ static struct pci_device_id ata_generic[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, + { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, +- { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), ++ .driver_data = ATA_GEN_FORCE_DMA }, + /* + * For some reason, MCP89 on MacBook 7,1 doesn't work with + * ahci, use ata_generic instead. + */ + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, +- PCI_VENDOR_ID_APPLE, 0xcb89, }, ++ PCI_VENDOR_ID_APPLE, 0xcb89, ++ .driver_data = ATA_GEN_FORCE_DMA }, + #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, +@@ -181,7 +192,8 @@ static struct pci_device_id ata_generic[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, + #endif + /* Must come last. If you add entries adjust this table appropriately */ +- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, ++ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), ++ .driver_data = ATA_GEN_CLASS_MATCH }, + { 0, }, + }; + diff --git a/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch b/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch new file mode 100644 index 000000000..89b67a762 --- /dev/null +++ b/btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch @@ -0,0 +1,42 @@ +From: Shi Weihua +Date: Tue, 18 May 2010 00:51:54 +0000 (+0000) +Subject: Btrfs: prohibit a operation of changing acl's mask when noacl mount option used +X-Git-Tag: v2.6.35-rc3~3^2~3 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=731e3d1b + +Btrfs: prohibit a operation of changing acl's mask when noacl mount option used + +when used Posix File System Test Suite(pjd-fstest) to test btrfs, +some cases about setfacl failed when noacl mount option used. +I simplified used commands in pjd-fstest, and the following steps +can reproduce it. +------------------------ +# cd btrfs-part/ +# mkdir aaa +# setfacl -m m::rw aaa <- successed, but not expected by pjd-fstest. +------------------------ +I checked ext3, a warning message occured, like as: + setfacl: aaa/: Operation not supported +Certainly, it's expected by pjd-fstest. + +So, i compared acl.c of btrfs and ext3. Based on that, a patch created. +Fortunately, it works. + +Signed-off-by: Shi Weihua +Signed-off-by: Chris Mason +--- + +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c +index 6b4d0cc..a372985 100644 +--- a/fs/btrfs/acl.c ++++ b/fs/btrfs/acl.c +@@ -163,6 +163,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, + if (!is_owner_or_cap(dentry->d_inode)) + return -EPERM; + ++ if (!IS_POSIXACL(dentry->d_inode)) ++ return -EOPNOTSUPP; ++ + if (value) { + acl = posix_acl_from_xattr(value, size); + if (acl == NULL) { diff --git a/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch b/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch new file mode 100644 index 000000000..2dd25ad35 --- /dev/null +++ b/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch @@ -0,0 +1,200 @@ +From: David Howells +Date: Thu, 22 Jul 2010 11:53:18 +0000 (+0100) +Subject: CIFS: Fix a malicious redirect problem in the DNS lookup code +X-Git-Tag: v2.6.35-rc6~6 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=4c0c03ca54f72fdd5912516ad0a23ec5cf01bda7 + +CIFS: Fix a malicious redirect problem in the DNS lookup code + +Fix the security problem in the CIFS filesystem DNS lookup code in which a +malicious redirect could be installed by a random user by simply adding a +result record into one of their keyrings with add_key() and then invoking a +CIFS CFS lookup [CVE-2010-2524]. + +This is done by creating an internal keyring specifically for the caching of +DNS lookups. To enforce the use of this keyring, the module init routine +creates a set of override credentials with the keyring installed as the thread +keyring and instructs request_key() to only install lookup result keys in that +keyring. + +The override is then applied around the call to request_key(). + +This has some additional benefits when a kernel service uses this module to +request a key: + + (1) The result keys are owned by root, not the user that caused the lookup. + + (2) The result keys don't pop up in the user's keyrings. + + (3) The result keys don't come out of the quota of the user that caused the + lookup. + +The keyring can be viewed as root by doing cat /proc/keys: + +2a0ca6c3 I----- 1 perm 1f030000 0 0 keyring .dns_resolver: 1/4 + +It can then be listed with 'keyctl list' by root. + + # keyctl list 0x2a0ca6c3 + 1 key in keyring: + 726766307: --alswrv 0 0 dns_resolver: foo.bar.com + +Signed-off-by: David Howells +Reviewed-and-Tested-by: Jeff Layton +Acked-by: Steve French +Signed-off-by: Linus Torvalds +--- + +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 484e52b..2cb1a70 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -923,7 +923,7 @@ init_cifs(void) + goto out_unregister_filesystem; + #endif + #ifdef CONFIG_CIFS_DFS_UPCALL +- rc = register_key_type(&key_type_dns_resolver); ++ rc = cifs_init_dns_resolver(); + if (rc) + goto out_unregister_key_type; + #endif +@@ -935,7 +935,7 @@ init_cifs(void) + + out_unregister_resolver_key: + #ifdef CONFIG_CIFS_DFS_UPCALL +- unregister_key_type(&key_type_dns_resolver); ++ cifs_exit_dns_resolver(); + out_unregister_key_type: + #endif + #ifdef CONFIG_CIFS_UPCALL +@@ -961,7 +961,7 @@ exit_cifs(void) + cifs_proc_clean(); + #ifdef CONFIG_CIFS_DFS_UPCALL + cifs_dfs_release_automount_timer(); +- unregister_key_type(&key_type_dns_resolver); ++ cifs_exit_dns_resolver(); + #endif + #ifdef CONFIG_CIFS_UPCALL + unregister_key_type(&cifs_spnego_key_type); +diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c +index 4db2c5e..49315cb 100644 +--- a/fs/cifs/dns_resolve.c ++++ b/fs/cifs/dns_resolve.c +@@ -24,12 +24,16 @@ + */ + + #include ++#include ++#include + #include + #include "dns_resolve.h" + #include "cifsglob.h" + #include "cifsproto.h" + #include "cifs_debug.h" + ++static const struct cred *dns_resolver_cache; ++ + /* Checks if supplied name is IP address + * returns: + * 1 - name is IP +@@ -94,6 +98,7 @@ struct key_type key_type_dns_resolver = { + int + dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) + { ++ const struct cred *saved_cred; + int rc = -EAGAIN; + struct key *rkey = ERR_PTR(-EAGAIN); + char *name; +@@ -133,8 +138,15 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) + goto skip_upcall; + } + ++ saved_cred = override_creds(dns_resolver_cache); + rkey = request_key(&key_type_dns_resolver, name, ""); ++ revert_creds(saved_cred); + if (!IS_ERR(rkey)) { ++ if (!(rkey->perm & KEY_USR_VIEW)) { ++ down_read(&rkey->sem); ++ rkey->perm |= KEY_USR_VIEW; ++ up_read(&rkey->sem); ++ } + len = rkey->type_data.x[0]; + data = rkey->payload.data; + } else { +@@ -165,4 +177,61 @@ out: + return rc; + } + ++int __init cifs_init_dns_resolver(void) ++{ ++ struct cred *cred; ++ struct key *keyring; ++ int ret; ++ ++ printk(KERN_NOTICE "Registering the %s key type\n", ++ key_type_dns_resolver.name); ++ ++ /* create an override credential set with a special thread keyring in ++ * which DNS requests are cached ++ * ++ * this is used to prevent malicious redirections from being installed ++ * with add_key(). ++ */ ++ cred = prepare_kernel_cred(NULL); ++ if (!cred) ++ return -ENOMEM; ++ ++ keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, ++ (KEY_POS_ALL & ~KEY_POS_SETATTR) | ++ KEY_USR_VIEW | KEY_USR_READ, ++ KEY_ALLOC_NOT_IN_QUOTA); ++ if (IS_ERR(keyring)) { ++ ret = PTR_ERR(keyring); ++ goto failed_put_cred; ++ } ++ ++ ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); ++ if (ret < 0) ++ goto failed_put_key; ++ ++ ret = register_key_type(&key_type_dns_resolver); ++ if (ret < 0) ++ goto failed_put_key; ++ ++ /* instruct request_key() to use this special keyring as a cache for ++ * the results it looks up */ ++ cred->thread_keyring = keyring; ++ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; ++ dns_resolver_cache = cred; ++ return 0; ++ ++failed_put_key: ++ key_put(keyring); ++failed_put_cred: ++ put_cred(cred); ++ return ret; ++} + ++void __exit cifs_exit_dns_resolver(void) ++{ ++ key_revoke(dns_resolver_cache->thread_keyring); ++ unregister_key_type(&key_type_dns_resolver); ++ put_cred(dns_resolver_cache); ++ printk(KERN_NOTICE "Unregistered %s key type\n", ++ key_type_dns_resolver.name); ++} +diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h +index 966e928..26b9eaa 100644 +--- a/fs/cifs/dns_resolve.h ++++ b/fs/cifs/dns_resolve.h +@@ -24,8 +24,8 @@ + #define _DNS_RESOLVE_H + + #ifdef __KERNEL__ +-#include +-extern struct key_type key_type_dns_resolver; ++extern int __init cifs_init_dns_resolver(void); ++extern void __exit cifs_exit_dns_resolver(void); + extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); + #endif /* KERNEL */ + diff --git a/config-arm b/config-arm new file mode 100644 index 000000000..ebc5d6c95 --- /dev/null +++ b/config-arm @@ -0,0 +1,110 @@ +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +# CONFIG_SMP is not set + +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +CONFIG_ARCH_VERSATILE=y +CONFIG_ARCH_VERSATILE_PB=y +CONFIG_MACH_VERSATILE_AB=y + +CONFIG_HIGHMEM=y +# CONFIG_HIGHPTE is not set + +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_WRITETHROUGH is not set +# CONFIG_CPU_CACHE_ROUND_ROBIN is not set + +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 + +# CONFIG_XIP_KERNEL is not set + +CONFIG_ATAGS_PROC=y + +# CONFIG_FPE_NWFPE is not set +CONFIG_FPE_FASTFPE=y +CONFIG_VFP=y + +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_PM_TRACE is not set +CONFIG_SUSPEND=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_APM_EMULATION=y + +CONFIG_ARM_THUMB=y + +CONFIG_AEABI=y +CONFIG_OABI_COMPAT=y + +# CONFIG_UACCESS_WITH_MEMCPY is not set + +CONFIG_CMDLINE="console=ttyAM0,115200 root=/dev/sda1 rootdelay=20" + +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# CONFIG_CPU_IDLE is not set + +CONFIG_LEDS=y +CONFIG_LEDS_CPU=y + +CONFIG_MTD_AFS_PARTS=y +CONFIG_MTD_ARM_INTEGRATOR=y +CONFIG_MTD_IMPA7=y + +CONFIG_AX88796=m +CONFIG_AX88796_93CX6=y +CONFIG_SMC91X=m +CONFIG_DM9000=m +CONFIG_DM9000_DEBUGLEVEL=4 +# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set +CONFIG_SMC911X=m +CONFIG_SMSC911X=m + +CONFIG_SERIO_AMBAKMI=m + +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y + +CONFIG_I2C_VERSATILE=y + +CONFIG_THERMAL=y + +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set + +CONFIG_FB_ARMCLCD=m + +CONFIG_SND_ARM=y +CONFIG_SND_ARMAACI=m + +CONFIG_USB_MUSB_HDRC=m +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_USB_TUSB6010=y +# CONFIG_USB_MUSB_DEBUG is not set + +CONFIG_MMC_ARMMMCI=m + +CONFIG_RTC_DRV_PL030=m +CONFIG_RTC_DRV_PL031=m + +# CONFIG_SGI_IOC4 is not set + +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_LL is not set + +CONFIG_ARM_UNWIND=y + +CONFIG_RCU_FANOUT=32 + +# CONFIG_USB_ULPI is not set +# CONFIG_OC_ETM is not set + +# CONFIG_MTD_PISMO is not set + +CONFIG_PERF_EVENTS=y +CONFIG_PERF_COUNTERS=y diff --git a/config-debug b/config-debug new file mode 100644 index 000000000..d66f25158 --- /dev/null +++ b/config-debug @@ -0,0 +1,83 @@ +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_DEBUG=y +CONFIG_SND_PCM_XRUN_DEBUG=y + +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_PROVE_RCU=y + +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_FAIL_IO_TIMEOUT=y + +CONFIG_SLUB_DEBUG_ON=y + +CONFIG_LOCK_STAT=y + +CONFIG_DEBUG_STACK_USAGE=y + +CONFIG_ACPI_DEBUG=y +# CONFIG_ACPI_DEBUG_FUNC_TRACE is not set + +CONFIG_DEBUG_SG=y + +# CONFIG_DEBUG_PAGEALLOC is not set + +CONFIG_DEBUG_WRITECOUNT=y +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 + +CONFIG_X86_PTDUMP=y + +CONFIG_CAN_DEBUG_DEVICES=y + +CONFIG_MODULE_FORCE_UNLOAD=y + +CONFIG_SYSCTL_SYSCALL_CHECK=y + +CONFIG_DEBUG_NOTIFIERS=y + +CONFIG_DMA_API_DEBUG=y + +CONFIG_MMIOTRACE=y + +CONFIG_DEBUG_CREDENTIALS=y + +CONFIG_EXT4_DEBUG=y + +CONFIG_DEBUG_PERF_USE_VMALLOC=y + +# off in both production debug and nodebug builds, +# on in rawhide nodebug builds +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set + +CONFIG_JBD2_DEBUG=y + +CONFIG_DEBUG_CFQ_IOSCHED=y + +CONFIG_DRBD_FAULT_INJECTION=y + +CONFIG_ATH_DEBUG=y +CONFIG_IWLWIFI_DEVICE_TRACING=y + +CONFIG_DEBUG_OBJECTS_WORK=y +# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set + +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +CONFIG_PM_ADVANCED_DEBUG=y + +CONFIG_CEPH_FS_PRETTYDEBUG=y +CONFIG_QUOTA_DEBUG=y diff --git a/config-generic b/config-generic new file mode 100644 index 000000000..67dfbad0b --- /dev/null +++ b/config-generic @@ -0,0 +1,4176 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_LOCALVERSION="" + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_HOTPLUG=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +CONFIG_BUILD_DOCSRC=y + +# +# General setup +# +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_SYSCTL=y +CONFIG_LOG_BUF_SHIFT=17 +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_EXTRA_PASS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_CFQ=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_NET_NS=y + +CONFIG_POSIX_MQUEUE=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +CONFIG_SLUB=y +# CONFIG_SLUB_STATS is not set + +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_IWMC3200TOP=m +# CONFIG_IWMC3200TOP_DEBUG is not set +CONFIG_IWMC3200TOP_DEBUGFS=y + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_LOAD is not set +# -- MODULE_FORCE_UNLOAD is controlled by config-debug/nodebug +# CONFIG_MODVERSIONS is not set +CONFIG_MODULE_SRCVERSION_ALL=y + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_PCI=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_IOV=y +CONFIG_HT_IRQ=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_DEFAULT_ON=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIE_ECRC=y +CONFIG_PCIEAER_INJECT=m +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HOTPLUG_PCI_FAKE=m +CONFIG_PCI_LEGACY=y + +CONFIG_ISA=y +# CONFIG_EISA is not set +# CONFIG_MCA is not set +# CONFIG_SCx200 is not set + +# +# PCMCIA/CardBus support +# +CONFIG_PCMCIA=y +CONFIG_PCMCIA_LOAD_CIS=y +# CONFIG_PCMCIA_DEBUG is not set +CONFIG_YENTA=m +CONFIG_CARDBUS=y +CONFIG_I82092=m +CONFIG_PD6729=m +CONFIG_PCMCIA_IOCTL=y + +CONFIG_PCCARD=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK_BOUNCE=y +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDRICOH_CS=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_WBSD=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_CB710=m +CONFIG_MMC_RICOH_MMC=y + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set + +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_DEBUG=y +CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_IPATH=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_AMSO1100=m +# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set +CONFIG_INFINIBAND_CXGB3=m +# CONFIG_INFINIBAND_CXGB3_DEBUG is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_INFINIBAND_NES=m +# CONFIG_INFINIBAND_NES_DEBUG is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_BINFMT_AOUT is not set +CONFIG_BINFMT_MISC=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" + +# CONFIG_SPI is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=m +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_AR7_PARTS=m +CONFIG_MTD_CONCAT=m +CONFIG_MTD_CMDLINE_PARTS=y +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_BLOCK_RO=m +CONFIG_MTD_BLOCK2MTD=m + +CONFIG_MTD_OOPS=m +# CONFIG_MTD_INTEL_VR_NOR is not set +CONFIG_MTD_ALAUDA=m + +CONFIG_FTL=m +CONFIG_NFTL=m +CONFIG_NFTL_RW=y +CONFIG_INFTL=m +CONFIG_RFD_FTL=m +CONFIG_SSFDC=m + +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_RESERVE=1 +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_DEBUG is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_ABSENT=m + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +# CONFIG_MTD_PHYSMAP is not set +CONFIG_MTD_SC520CDP=m +CONFIG_MTD_NETSC520=m +# CONFIG_MTD_SBC_GXX is not set +# CONFIG_MTD_SCx200_DOCFLASH is not set +# CONFIG_MTD_AMD76XROM is not set +CONFIG_MTD_SCB2_FLASH=m +# CONFIG_MTD_NETtel is not set +# CONFIG_MTD_DILNETPC is not set +# CONFIG_MTD_L440GX is not set +CONFIG_MTD_PCI=m +CONFIG_MTD_TS5500=m +# CONFIG_MTD_GPIO_ADDR is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_PMC551=m +# CONFIG_MTD_PMC551_BUGFIX is not set +# CONFIG_MTD_PMC551_DEBUG is not set +# CONFIG_MTD_SLRAM is not set +CONFIG_MTD_MTDRAM=m +CONFIG_MTDRAM_TOTAL_SIZE=4096 +CONFIG_MTDRAM_ERASE_SIZE=128 + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_DOCPROBE is not set +# CONFIG_MTD_DOCPROBE_ADVANCED is not set +# CONFIG_MTD_DOCPROBE_ADDRESS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=m +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_CAFE is not set +CONFIG_MTD_NAND_IDS=m +CONFIG_MTD_NAND_NANDSIM=m +# CONFIG_MTD_ONENAND is not set +CONFIG_MTD_NAND_ECC_SMC=y +CONFIG_MTD_NAND_CS553X=m + +CONFIG_MTD_REDBOOT_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 +# CONFIG_MTD_XIP is not set +# CONFIG_MTD_ICHXROM is not set +# CONFIG_MTD_PHRAM is not set +CONFIG_MTD_NAND_DISKONCHIP=m +# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set +CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0 +# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set +# CONFIG_MTD_PLATRAM is not set + +# CONFIG_MTD_TESTS is not set +CONFIG_MTD_LPDDR=m +CONFIG_MTD_QINFO_PROBE=m + +# +# Parallel port support +# +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_PC_PCMCIA=m +CONFIG_PARPORT_1284=y +# CONFIG_PARPORT_AX88796 is not set + +# +# Plug and Play support +# +CONFIG_PNP=y +# CONFIG_PNP_DEBUG is not set +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_ISAPNP=y +# CONFIG_PNPBIOS is not set + +CONFIG_ACPI_PCI_SLOT=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m + +# +# Block devices +# +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_FD=m +# CONFIG_BLK_DEV_XD is not set +CONFIG_PARIDE=m +CONFIG_PARIDE_PD=m +CONFIG_PARIDE_PCD=m +CONFIG_PARIDE_PF=m +CONFIG_PARIDE_PT=m +CONFIG_PARIDE_PG=m +CONFIG_PARIDE_ATEN=m +CONFIG_PARIDE_BPCK=m +CONFIG_PARIDE_BPCK6=m +CONFIG_PARIDE_COMM=m +CONFIG_PARIDE_DSTR=m +CONFIG_PARIDE_FIT2=m +CONFIG_PARIDE_FIT3=m +CONFIG_PARIDE_EPAT=m +CONFIG_PARIDE_EPATC8=y +CONFIG_PARIDE_EPIA=m +CONFIG_PARIDE_FRIQ=m +CONFIG_PARIDE_FRPW=m +CONFIG_PARIDE_KBIC=m +CONFIG_PARIDE_KTTI=m +CONFIG_PARIDE_ON20=m +CONFIG_PARIDE_ON26=m +CONFIG_BLK_CPQ_DA=m +CONFIG_BLK_CPQ_CISS_DA=m +CONFIG_CISS_SCSI_TAPE=y +CONFIG_BLK_DEV_DAC960=m +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_OSD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_ATIIXP=y +CONFIG_LBD=y +CONFIG_BLK_DEV_IO_TRACE=y + +CONFIG_BLK_DEV_DELKIN=m +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_INTEGRITY=y + + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDECS=m +CONFIG_BLK_DEV_IDECD=m +# CONFIG_BLK_DEV_IDETAPE is not set +CONFIG_IDE_TASK_IOCTL=y +# CONFIG_BLK_DEV_IDE_SATA is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_BLK_DEV_CMD640=y +CONFIG_BLK_DEV_CMD640_ENHANCED=y +CONFIG_BLK_DEV_IDEPNP=y +CONFIG_BLK_DEV_IDEPCI=y +# CONFIG_BLK_DEV_OFFBOARD is not set +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_RZ1000=y +CONFIG_BLK_DEV_IDEDMA_PCI=y +CONFIG_BLK_DEV_AEC62XX=y +CONFIG_BLK_DEV_ALI15X3=y +# CONFIG_BLK_DEV_AMD74XX is not set +CONFIG_BLK_DEV_CMD64X=y +CONFIG_BLK_DEV_TRIFLEX=y +# CONFIG_BLK_DEV_CY82C693 is not set +CONFIG_BLK_DEV_CS5520=y +CONFIG_BLK_DEV_CS5530=y +CONFIG_BLK_DEV_CS5535=y +CONFIG_BLK_DEV_HPT366=y +CONFIG_BLK_DEV_IT821X=y +CONFIG_BLK_DEV_JMICRON=y +# CONFIG_BLK_DEV_SC1200 is not set +CONFIG_BLK_DEV_PIIX=y +# CONFIG_BLK_DEV_NS87415 is not set +CONFIG_BLK_DEV_PDC202XX_OLD=y +CONFIG_BLK_DEV_PDC202XX_NEW=y +CONFIG_BLK_DEV_SVWKS=y +CONFIG_BLK_DEV_SIIMAGE=y +CONFIG_BLK_DEV_SIS5513=y +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +CONFIG_BLK_DEV_VIA82CXXX=y +CONFIG_BLK_DEV_IDEDMA=y +# CONFIG_BLK_DEV_HD is not set + +CONFIG_VIRTIO=m +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_RING=m +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_NET=m +CONFIG_VMXNET3=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VHOST_NET=m + +# +# SCSI device support +# +CONFIG_SCSI=y + +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_PROC_FS=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SRP=m +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_TGT=m + +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_FC_TGT_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_RAID_ATTRS=m + +CONFIG_ISCSI_TCP=m + +# +# SCSI low-level drivers +# +CONFIG_SCSI_LOWLEVEL=y +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_3W_9XXX=m +# CONFIG_SCSI_7000FASST is not set +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_SCSI_AIC94XX=m +# CONFIG_AIC94XX_DEBUG is not set +CONFIG_AIC7XXX_CMDS_PER_DEVICE=4 +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_AIC7XXX_DEBUG_MASK=0 +# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set +CONFIG_SCSI_AIC7XXX_OLD=m +CONFIG_SCSI_AIC79XX=m +CONFIG_AIC79XX_CMDS_PER_DEVICE=4 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +# CONFIG_AIC79XX_BUILD_FIRMWARE is not set +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_AIC79XX_DEBUG_MASK=0 +# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set +# CONFIG_SCSI_ADVANSYS is not set +CONFIG_SCSI_BFA_FC=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS_LOGGING=y + +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set + +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_PMCRAID=m + +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_PM8001=m +CONFIG_VMWARE_PVSCSI=m +CONFIG_VMWARE_BALLOON=m + +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_SFF=y +CONFIG_ATA_PIIX=y +CONFIG_ATA_ACPI=y +CONFIG_BLK_DEV_SX8=m +CONFIG_PDC_ADMA=m +CONFIG_SATA_AHCI=y +CONFIG_SATA_INIC162X=m +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_PMP=y +CONFIG_SATA_PROMISE=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIL24=m +CONFIG_SATA_SIS=m +CONFIG_SATA_SVW=m +CONFIG_SATA_SX4=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m + +CONFIG_PATA_ACPI=m +CONFIG_PATA_ALI=m +CONFIG_PATA_AMD=m +CONFIG_PATA_ARTOP=m +CONFIG_PATA_ATIIXP=m +CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_CMD64X=m +CONFIG_PATA_CS5520=m +CONFIG_PATA_CS5530=m +CONFIG_PATA_CS5535=m +CONFIG_PATA_CS5536=m +CONFIG_PATA_CYPRESS=m +CONFIG_PATA_EFAR=m +CONFIG_ATA_GENERIC=m +CONFIG_PATA_HPT366=m +CONFIG_PATA_HPT37X=m +CONFIG_PATA_HPT3X2N=m +CONFIG_PATA_HPT3X3=m +# CONFIG_PATA_HPT3X3_DMA is not set +# CONFIG_PATA_ISAPNP is not set +CONFIG_PATA_IT821X=m +CONFIG_PATA_IT8213=m +CONFIG_PATA_JMICRON=m +# CONFIG_PATA_LEGACY is not set +CONFIG_PATA_NINJA32=m +CONFIG_PATA_MARVELL=m +# CONFIG_PATA_WINBOND_VLB is not set +CONFIG_PATA_MPIIX=m +CONFIG_PATA_NETCELL=m +CONFIG_PATA_NS87410=m +CONFIG_PATA_NS87415=m +CONFIG_PATA_OLDPIIX=m +CONFIG_PATA_OPTI=m +CONFIG_PATA_OPTIDMA=m +CONFIG_PATA_PCMCIA=m +CONFIG_PATA_PDC_OLD=m +CONFIG_PATA_QDI=m +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=m +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_SC1200 is not set +CONFIG_PATA_SERVERWORKS=m +CONFIG_PATA_PDC2027X=m +CONFIG_PATA_SCH=m +CONFIG_PATA_SIL680=m +CONFIG_PATA_SIS=m +CONFIG_PATA_TOSHIBA=m +CONFIG_PATA_TRIFLEX=m +CONFIG_PATA_VIA=m +CONFIG_PATA_WINBOND=m +CONFIG_PATA_ATP867X=m + +CONFIG_SCSI_BUSLOGIC=m +CONFIG_SCSI_INITIO=m +CONFIG_SCSI_FLASHPOINT=y +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_EATA is not set +# CONFIG_SCSI_EATA_PIO is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +CONFIG_SCSI_GDTH=m +CONFIG_SCSI_HPTIOP=m +CONFIG_SCSI_IPS=m +CONFIG_SCSI_INIA100=m +CONFIG_SCSI_PPA=m +CONFIG_SCSI_IMM=m +# CONFIG_SCSI_IZIP_EPP16 is not set +# CONFIG_SCSI_IZIP_SLOW_CTR is not set +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_DC395x=m +# CONFIG_SCSI_NSP32 is not set +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_DC390T=m +CONFIG_SCSI_QLA_FC=m +CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_DPT_I2O is not set +CONFIG_SCSI_LPFC=m + +# +# PCMCIA SCSI adapter support +# +CONFIG_SCSI_LOWLEVEL_PCMCIA=y +CONFIG_PCMCIA_AHA152X=m +# CONFIG_PCMCIA_FDOMAIN is not set +CONFIG_PCMCIA_NINJA_SCSI=m +CONFIG_PCMCIA_QLOGIC=m +CONFIG_PCMCIA_SYM53C500=m + + +# +# Multi-device support (RAID and LVM) +# +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_FAULTY=m +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MULTICORE_RAID456=y +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=m +CONFIG_DM_DEBUG=y +# CONFIG_DM_DELAY is not set +CONFIG_DM_MIRROR=y +CONFIG_DM_MULTIPATH=m +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_ZERO=y +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m + +# +# Fusion MPT device support +# +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_MAX_SGE=40 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LAN=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support (JUJU alternative stack) +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_FIREWIRE_OHCI_DEBUG=y +# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set +# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set + +# +# Networking support +# +CONFIG_NET=y + +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_YEAH=m + +CONFIG_TCP_MD5SIG=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_INET_LRO=y +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_NF_SECURITY=m +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_ARPD=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL_TRAP=y +CONFIG_NET_POLL_CONTROLLER=y + +# +# IP: Virtual Server Configuration +# +CONFIG_IP_VS=m +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m + +CONFIG_IPV6=m +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y + +CONFIG_RDS=m +# CONFIG_RDS_DEBUG is not set +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m + +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +# CONFIG_NET_9P_DEBUG is not set +CONFIG_NET_9P_RDMA=m + +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +# CONFIG_DECNET_NF_GRABULATOR is not set +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NF_CONNTRACK=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_OSF=m + +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_BRIDGE_NETFILTER=y + +# +# IP: Netfilter Configuration +# + +CONFIG_NF_CT_ACCT=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_SCTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_PROTO_UDPLITE=m + +CONFIG_IP_NF_MATCH_ADDRTYPE=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_RAW=m + +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_FILTER=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_QUEUE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_HL=m + +# +# Bridge: Netfilter Configuration +# +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_XFRM=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_USER=y +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_INET6_XFRM_MODE_BEET=m + +# +# SCTP Configuration (EXPERIMENTAL) +# +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_MSG is not set +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_HMAC_NONE is not set +CONFIG_SCTP_HMAC_SHA1=y +# CONFIG_SCTP_HMAC_MD5 is not set +CONFIG_ATM=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +CONFIG_IPX=m +# CONFIG_IPX_INTERN is not set +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=y +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +CONFIG_WAN_ROUTER=m +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2=m +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_CCID3_RTO=100 +# CONFIG_IP_DCCP_DEBUG is not set +CONFIG_NET_DCCPPROBE=m + +# +# TIPC Configuration (EXPERIMENTAL) +# +# CONFIG_TIPC is not set +# CONFIG_TIPC_ADVANCED is not set +# CONFIG_TIPC_DEBUG is not set + +CONFIG_NETLABEL=y + +# +# QoS and/or fair queueing +# +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_ROUTE=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_MARK=y +CONFIG_CLS_U32_PERF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_U32=m + +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_SIMP=m + +CONFIG_DCB=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_TCPPROBE is not set +CONFIG_NET_DROP_MONITOR=y +CONFIG_NETDEVICES=y + +# disable later --kyle + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set +CONFIG_IFB=m +CONFIG_DUMMY=m +CONFIG_BONDING=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_EQUALIZER=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_NET_SB1000=m + +# +# ATM +# +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +CONFIG_ATM_CLIP=m +CONFIG_ATM_LANE=m +CONFIG_ATM_BR2684=m +CONFIG_NET_SCH_ATM=m +CONFIG_ATM_TCP=m +# CONFIG_ATM_LANAI is not set +CONFIG_ATM_ENI=m +CONFIG_ATM_FIRESTREAM=m +# CONFIG_ATM_ZATM is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_AMBASSADOR is not set +# CONFIG_ATM_HORIZON is not set +# CONFIG_ATM_FORE200E is not set +# CONFIG_ATM_FORE200E_USE_TASKLET is not set +CONFIG_ATM_FORE200E_TX_RETRY=16 +CONFIG_ATM_FORE200E_DEBUG=0 + +CONFIG_ATM_HE=m +CONFIG_PPPOATM=m +CONFIG_PPPOL2TP=m +CONFIG_ATM_NICSTAR=m +# CONFIG_ATM_IA is not set +# CONFIG_ATM_CLIP_NO_ICMP is not set +# CONFIG_ATM_MPOA is not set +# CONFIG_ATM_BR2684_IPFILTER is not set +# CONFIG_ATM_ENI_DEBUG is not set +# CONFIG_ATM_ENI_TUNE_BURST is not set +# CONFIG_ATM_ZATM_DEBUG is not set +# CONFIG_ATM_IDT77252_DEBUG is not set +# CONFIG_ATM_IDT77252_RCV_ALL is not set +# CONFIG_ATM_AMBASSADOR_DEBUG is not set +# CONFIG_ATM_HORIZON_DEBUG is not set +# CONFIG_ATM_HE_USE_SUNI is not set +# CONFIG_ATM_NICSTAR_USE_SUNI is not set +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set +# CONFIG_ATM_IA_DEBUG is not set +CONFIG_ATM_SOLOS=m + +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NETDEV_1000=y +CONFIG_NETDEV_10000=y +CONFIG_NET_ETHERNET=y +CONFIG_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_MDIO_BITBANG=m +CONFIG_NATIONAL_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_VITESSE_PHY=m +CONFIG_MICREL_PHY=m + +CONFIG_MII=m +CONFIG_HAPPYMEAL=m +CONFIG_SUNGEM=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_VORTEX=m +CONFIG_TYPHOON=m +CONFIG_DNET=m + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +# CONFIG_TULIP_NAPI is not set +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +# CONFIG_NI5010 is not set +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_PCMCIA_XIRCOM=m +CONFIG_ULI526X=m +# CONFIG_HP100 is not set +CONFIG_LNE390=m +CONFIG_NE3210=m +CONFIG_ES3210=m +CONFIG_NET_PCI=y +CONFIG_PCNET32=m +CONFIG_AMD8111_ETH=m +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_KSZ884X_PCI=m +CONFIG_B44=m +CONFIG_B44_PCI=y +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_QLA3XXX=m +CONFIG_ATL1=m +CONFIG_ATL1C=m +CONFIG_ATL2=m +CONFIG_ATL1E=m +CONFIG_E100=m +CONFIG_FEALNX=m +CONFIG_FORCEDETH=m +CONFIG_FORCEDETH_NAPI=y +CONFIG_NATSEMI=m +CONFIG_NE2K_PCI=m +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_SIS900=m +CONFIG_SIS190=m +CONFIG_EPIC100=m +CONFIG_SC92031=m +CONFIG_SMSC9420=m +CONFIG_SUNDANCE=m +# CONFIG_SUNDANCE_MMIO is not set +CONFIG_TLAN=m +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +CONFIG_NET_POCKET=y +CONFIG_ATP=m +CONFIG_DE600=m +CONFIG_DE620=m +CONFIG_CASSINI=m +CONFIG_ETHOC=m +# CONFIG_KS8842 is not set +# CONFIG_KS8851_MLL is not set + +# +# Ethernet (1000 Mbit) +# +CONFIG_ACENIC=m +# CONFIG_ACENIC_OMIT_TIGON_I is not set +CONFIG_DL2K=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_NS83820=m +CONFIG_HAMACHI=m +CONFIG_YELLOWFIN=m +CONFIG_R8169=m +CONFIG_R8169_VLAN=y +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_TIGON3=m +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_JME=m + +# +# Ethernet (10000 Mbit) +# +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +CONFIG_IP1000=m +CONFIG_IXGB=m +CONFIG_IXGBEVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +CONFIG_NETXEN_NIC=m +CONFIG_NIU=m +CONFIG_S2IO=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_TEHUTI=m +CONFIG_ENIC=m +CONFIG_MLX4_EN=m +# CONFIG_MLX4_DEBUG is not set +CONFIG_QLCNIC=m +CONFIG_QLGE=m +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_BE2NET=m + +CONFIG_FDDI=y +# CONFIG_DEFXX is not set +CONFIG_SKFP=m +# CONFIG_HIPPI is not set +CONFIG_PLIP=m +CONFIG_PPP=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_IPPP_FILTER=y +# CONFIG_PPP_BSDCOMP is not set +CONFIG_PPPOE=m +CONFIG_PPP_MPPE=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set + +# +# Wireless LAN +# +# +CONFIG_WLAN=y +# CONFIG_STRIP is not set +# CONFIG_ARLAN is not set +CONFIG_PCMCIA_WAVELAN=m +CONFIG_PCMCIA_NETWAVE=m +# CONFIG_PCMCIA_RAYCS is not set + +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEBUGFS=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_DEFAULT_PS=y +CONFIG_NL80211=y +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_WIRELESS_OLD_REGULATORY is not set +CONFIG_WIRELESS_EXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set + +CONFIG_MAC80211=m +CONFIG_MAC80211_RC_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel" +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_DEBUG_MENU is not set + +CONFIG_WIMAX=m +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_WIMAX_I2400M_USB=m +CONFIG_WIMAX_I2400M_SDIO=m +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 +CONFIG_WIMAX_IWMC3200_SDIO=y + +CONFIG_ADM8211=m +CONFIG_ATH_COMMON=m +CONFIG_ATH5K=m +CONFIG_ATH5K_DEBUG=y +CONFIG_ATH9K=m +# CONFIG_ATH9K_DEBUG is not set +CONFIG_ATH9K_DEBUGFS=y +CONFIG_AT76C50X_USB=m +CONFIG_AIRO=m +CONFIG_AIRO_CS=m +CONFIG_ATMEL=m +CONFIG_B43=m +CONFIG_B43_PCMCIA=y +CONFIG_B43_SDIO=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43_PHY_LP=y +# CONFIG_B43_FORCE_PIO is not set +CONFIG_B43LEGACY=m +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_HERMES=m +CONFIG_HERMES_CACHE_FW_ON_INIT=y +CONFIG_HOSTAP=m +CONFIG_HOSTAP_PCI=m +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_CS=m +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IPW2100_DEBUG is not set +# CONFIG_IPW2200_DEBUG is not set +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_CS=m +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_DEBUG=y +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_LIBERTAS_MESH=y +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y +CONFIG_IWLAGN=m +CONFIG_IWL4965=y +CONFIG_IWL5000=y +CONFIG_IWL3945=m +CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y +CONFIG_IWM=m +# CONFIG_IWM_DEBUG is not set +CONFIG_MAC80211_HWSIM=m +CONFIG_NORTEL_HERMES=m +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +CONFIG_PCI_HERMES=m +CONFIG_PLX_HERMES=m +CONFIG_PCI_ATMEL=m +CONFIG_MWL8K=m +# CONFIG_PRISM54 is not set +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_SPECTRUM=m +CONFIG_PCMCIA_ATMEL=m +CONFIG_PCMCIA_WL3501=m +CONFIG_RT2X00=m +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2500USB=m +CONFIG_RT2800USB=m +# CONFIG_RT2800USB_RT30XX is not set +# CONFIG_RT2800USB_RT35XX is not set +# CONFIG_RT2800USB_UNKNOWN is not set +CONFIG_RT2800PCI=m +# CONFIG_RT2800PCI_RT30XX is not set +# CONFIG_RT2800PCI_RT35XX is not set +CONFIG_RT73USB=m +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_TMD_HERMES=m +CONFIG_USB_ZD1201=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_AR9170_USB=m + +CONFIG_WL12XX=y +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_WL1251_SDIO=m +CONFIG_WL1271=m + +# +# Token Ring devices +# +# CONFIG_TR is not set +# CONFIG_IBMOL is not set +# CONFIG_3C359 is not set +# Broken with gcc4.1 +# CONFIG_TMS380TR is not set +# CONFIG_TMSPCI is not set +# CONFIG_ABYSS is not set +# CONFIG_IBMLS is not set +# CONFIG_PCMCIA_IBMTR is not set + + +CONFIG_NET_FC=y + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# PCMCIA network device support +# +CONFIG_NET_PCMCIA=y +CONFIG_PCMCIA_3C589=m +CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_FMVJ18X=m +CONFIG_PCMCIA_PCNET=m +CONFIG_PCMCIA_NMCLAN=m +CONFIG_PCMCIA_SMC91C92=m +CONFIG_PCMCIA_XIRC2PS=m +CONFIG_PCMCIA_AXNET=m + +# +# Amateur Radio support +# +CONFIG_HAMRADIO=y +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_SJA1000=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_NETROM=m +CONFIG_ROSE=m +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_SCC=m +CONFIG_DMASCC=m +# CONFIG_SCC_DELAY is not set +CONFIG_SCC_TRXECHO=y +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_BAYCOM_PAR=m +CONFIG_BAYCOM_EPP=m +CONFIG_YAM=m + +# +# IrDA (infrared) support +# +CONFIG_IRDA=m +# CONFIG_IRDA_DEBUG is not set +CONFIG_IRLAN=m +CONFIG_IRNET=m +CONFIG_IRCOMM=m +# CONFIG_IRDA_ULTRA is not set +CONFIG_IRDA_CACHE_LAST_LSAP=y +CONFIG_IRDA_FAST_RR=y +CONFIG_IRTTY_SIR=m +CONFIG_DONGLE=y +CONFIG_ACTISYS_DONGLE=m +CONFIG_ACT200L_DONGLE=m +CONFIG_ESI_DONGLE=m +CONFIG_GIRBIL_DONGLE=m +CONFIG_KINGSUN_DONGLE=m +CONFIG_KSDAZZLE_DONGLE=m +CONFIG_KS959_DONGLE=m +CONFIG_LITELINK_DONGLE=m +CONFIG_MA600_DONGLE=m +CONFIG_MCP2120_DONGLE=m +CONFIG_OLD_BELKIN_DONGLE=m +CONFIG_TEKRAM_DONGLE=m +CONFIG_TOIM3232_DONGLE=m + +CONFIG_ALI_FIR=m +CONFIG_MCS_FIR=m +CONFIG_NSC_FIR=m +CONFIG_SIGMATEL_FIR=m +CONFIG_SMC_IRCC_FIR=m +# CONFIG_TOSHIBA_FIR is not set +CONFIG_USB_IRDA=m +CONFIG_VLSI_FIR=m +CONFIG_VIA_FIR=m +CONFIG_WINBOND_FIR=m + +# +# Bluetooth support +# +CONFIG_BT=m +CONFIG_BT_L2CAP=m +CONFIG_BT_SCO=m +CONFIG_BT_CMTP=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m + +# +# Bluetooth device drivers +# +CONFIG_BT_HCIBTUSB=m +# Disable the BT_HCIUSB driver. +# It sucks more power than BT_HCIBTUSB which has the same functionality. +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIBTUART=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m + +# +# ISDN subsystem +# +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_ISDN_I4L=m +CONFIG_ISDN_DRV_AVMB1_B1PCI=m +CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m +CONFIG_ISDN_DRV_AVMB1_T1PCI=m +CONFIG_ISDN_DRV_AVMB1_C4=m + +CONFIG_MISDN_HFCUSB=m + +CONFIG_ISDN_PPP=y +CONFIG_ISDN_PPP_VJ=y +CONFIG_ISDN_MPP=y +# CONFIG_ISDN_PPP_BSDCOMP is not set +CONFIG_ISDN_TTY_FAX=y +CONFIG_DE_AOC=y + +CONFIG_ISDN_AUDIO=y + +CONFIG_ISDN_DRV_HISAX=m +CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y +CONFIG_ISDN_DRV_AVMB1_AVM_CS=m + +CONFIG_ISDN_CAPI_CAPIDRV=m +CONFIG_ISDN_DIVERSION=m + +CONFIG_HISAX_EURO=y +CONFIG_HISAX_1TR6=y +CONFIG_HISAX_NI1=y +CONFIG_HISAX_MAX_CARDS=8 +CONFIG_HISAX_16_3=y +CONFIG_HISAX_TELESPCI=y +CONFIG_HISAX_S0BOX=y +CONFIG_HISAX_FRITZPCI=y +CONFIG_HISAX_AVM_A1_PCMCIA=y +CONFIG_HISAX_ELSA=y +CONFIG_HISAX_DIEHLDIVA=y +CONFIG_HISAX_SEDLBAUER=y +CONFIG_HISAX_NETJET=y +CONFIG_HISAX_NETJET_U=y +CONFIG_HISAX_NICCY=y +CONFIG_HISAX_BKM_A4T=y +CONFIG_HISAX_SCT_QUADRO=y +CONFIG_HISAX_GAZEL=y +CONFIG_HISAX_HFC_PCI=y +CONFIG_HISAX_W6692=y +CONFIG_HISAX_HFC_SX=y +CONFIG_HISAX_ENTERNOW_PCI=y +# CONFIG_HISAX_DEBUG is not set +CONFIG_HISAX_AVM_A1_CS=m +CONFIG_HISAX_ST5481=m +# CONFIG_HISAX_HFCUSB is not set +CONFIG_HISAX_FRITZ_PCIPNP=m +CONFIG_HISAX_NO_SENDCOMPLETE=y +CONFIG_HISAX_NO_LLC=y +CONFIG_HISAX_NO_KEYPAD=y +CONFIG_HISAX_SEDLBAUER_CS=m +CONFIG_HISAX_ELSA_CS=m +CONFIG_HISAX_TELES_CS=m +CONFIG_HISAX_HFC4S8S=m + +CONFIG_ISDN_DRV_LOOP=m +CONFIG_HYSDN=m +CONFIG_HYSDN_CAPI=y + + +# +# CAPI subsystem +# +CONFIG_ISDN_CAPI=m +# CONFIG_CAPI_TRACE is not set +CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_ISDN_CAPI_CAPI20=m +CONFIG_ISDN_CAPI_CAPIFS_BOOL=y +CONFIG_ISDN_CAPI_CAPIFS=m + +# +# CAPI hardware drivers +# + +# +# Active AVM cards +# +CONFIG_CAPI_AVM=y + +# +# Active Eicon DIVA Server cards +# +# CONFIG_CAPI_EICON is not set +CONFIG_ISDN_DIVAS=m +CONFIG_ISDN_DIVAS_BRIPCI=y +CONFIG_ISDN_DIVAS_PRIPCI=y +CONFIG_ISDN_DIVAS_DIVACAPI=m +CONFIG_ISDN_DIVAS_USERIDI=m +CONFIG_ISDN_DIVAS_MAINT=m + +CONFIG_ISDN_DRV_GIGASET=m +CONFIG_GIGASET_CAPI=y +CONFIG_GIGASET_BASE=m +CONFIG_GIGASET_M101=m +CONFIG_GIGASET_M105=m +# CONFIG_GIGASET_DEBUG is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=m + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_GTCO=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_USB_WACOM=m + +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m + +# +# Input I/O drivers +# +CONFIG_GAMEPORT=m +CONFIG_GAMEPORT_NS558=m +CONFIG_GAMEPORT_L4=m +CONFIG_GAMEPORT_EMU10K1=m +CONFIG_GAMEPORT_FM801=m +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m + +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_LM8323 is not set +CONFIG_QT2160=m +CONFIG_KEYBOARD_ADP5588=m +CONFIG_KEYBOARD_MAX7359=m +CONFIG_KEYBOARD_OPENCORES=m +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_ANALOG=m +CONFIG_JOYSTICK_A3D=m +CONFIG_JOYSTICK_ADI=m +CONFIG_JOYSTICK_COBRA=m +CONFIG_JOYSTICK_GF2K=m +CONFIG_JOYSTICK_GRIP=m +CONFIG_JOYSTICK_GRIP_MP=m +CONFIG_JOYSTICK_GUILLEMOT=m +CONFIG_JOYSTICK_INTERACT=m +CONFIG_JOYSTICK_SIDEWINDER=m +CONFIG_JOYSTICK_TMDC=m +CONFIG_JOYSTICK_IFORCE=m +CONFIG_JOYSTICK_IFORCE_USB=y +CONFIG_JOYSTICK_IFORCE_232=y +CONFIG_JOYSTICK_WARRIOR=m +CONFIG_JOYSTICK_MAGELLAN=m +CONFIG_JOYSTICK_SPACEORB=m +CONFIG_JOYSTICK_SPACEBALL=m +CONFIG_JOYSTICK_STINGER=m +CONFIG_JOYSTICK_DB9=m +CONFIG_JOYSTICK_GAMECON=m +CONFIG_JOYSTICK_TURBOGRAFX=m +CONFIG_JOYSTICK_JOYDUMP=m +CONFIG_JOYSTICK_TWIDJOY=m +CONFIG_JOYSTICK_WALKERA0701=m +CONFIG_JOYSTICK_XPAD=m +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_JOYSTICK_ZHENHUA=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_GUNZE=m +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_HTCPEN=m +CONFIG_TOUCHSCREEN_INEXIO=m +CONFIG_TOUCHSCREEN_MTOUCH=m +CONFIG_TOUCHSCREEN_MK712=m +CONFIG_TOUCHSCREEN_PENMOUNT=m +CONFIG_TOUCHSCREEN_TSC2007=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +CONFIG_TOUCHSCREEN_TOUCHIT213=m +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m +CONFIG_TOUCHSCREEN_TOUCHWIN=m +CONFIG_TOUCHSCREEN_UCB1400=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_DYNAPRO=m +# CONFIG_TOUCHSCREEN_WM97XX is not set +CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_W90X900=m +CONFIG_TOUCHSCREEN_MCS5000=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_PCSPKR=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_WISTRON_BTNS=m +CONFIG_INPUT_ATLAS_BTNS=m + +CONFIG_INPUT_ATI_REMOTE=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_IMON=m + +CONFIG_MAC_EMUMOUSEBTN=y + +CONFIG_INPUT_WM831X_ON=m + +CONFIG_INPUT_APPLEIR=m + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_ROCKETPORT=m +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_N_HDLC=m +# CONFIG_STALDRV is not set +# CONFIG_IBM_ASM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +CONFIG_TCG_TPM=m +CONFIG_TCG_TIS=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TELCLOCK=m + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_CS=m +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +# CONFIG_COMPUTONE is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_DIGIEPCA is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +# CONFIG_ISI is not set +# CONFIG_RISCOM8 is not set +# CONFIG_SPECIALIX is not set +# CONFIG_SX is not set +# CONFIG_RIO is not set +# CONFIG_STALLION is not set +# CONFIG_ISTALLION is not set +CONFIG_SERIAL_JSM=m + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_TIMBERDALE is not set +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_PRINTER=m +CONFIG_LP_CONSOLE=y +CONFIG_PPDEV=m + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m + +# +# I2C Algorithms +# +# CONFIG_I2C_DEBUG_ALGO is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m + +# +# I2C Hardware Bus support +# + +CONFIG_I2C_ALGOPCA=m +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD756_S4882 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +# CONFIG_I2C_ELEKTOR is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_NFORCE2_S4985 is not set + +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_93CX6=m +CONFIG_EEPROM_MAX6875=m + +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PARPORT=m +CONFIG_I2C_PARPORT_LIGHT=m +CONFIG_I2C_PASEMI=m +CONFIG_I2C_PCA_ISA=m +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_PIIX4 is not set +# CONFIG_SCx200_ACB is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_STUB=m +CONFIG_I2C_TINY_USB=m +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_XILINX is not set + +# +# I2C Hardware Sensors Chip support +# +CONFIG_SENSORS_ATK0110=m +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7473=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_DS1621=m +# CONFIG_DS1682 is not set +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_HDAPS=m +# CONFIG_SENSORS_I5K_AMB is not set +# FIXME: IBMAEM x86 only? +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_LIS3LV02D=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_PCF8591=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_WM8350=m +CONFIG_SENSORS_WM831X=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ASC7621=m + +CONFIG_W1=m +CONFIG_W1_CON=y +# This is busted. +# If we enable it, it steals Matrox cards, and the +# framebuffer drivers stop working. +# CONFIG_W1_MASTER_MATROX is not set +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS1WM=m +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +CONFIG_W1_SLAVE_DS2433_CRC=y +CONFIG_W1_SLAVE_DS2760=m +# +# Mice +# + +# +# IPMI +# +CONFIG_IPMI_HANDLER=m +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_POWEROFF=m + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDTPCI=m +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_EUROTECH_WDT is not set +# CONFIG_IB700_WDT is not set +# CONFIG_MIXCOMWD is not set +# CONFIG_SCx200_WDT is not set +# CONFIG_60XX_WDT is not set +CONFIG_W83877F_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SC520_WDT is not set +CONFIG_ALIM7101_WDT=m +CONFIG_ALIM1535_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +# CONFIG_WAFER_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IT8712F_WDT=m +# CONFIG_SBC8360_WDT is not set +# CONFIG_SBC7240_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_USBPCWATCHDOG=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_WM8350_WATCHDOG=m +CONFIG_WM831X_WATCHDOG=m +# CONFIG_MAX63XX_WATCHDOG is not set + +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_RTC_DEBUG is not set +# CONFIG_GEN_RTC is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_EP93XX is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_RS5C372=m +# CONFIG_RTC_DRV_SA1100 is not set +# CONFIG_RTC_DRV_TEST is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_V3020=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_WM8350=m +# CONFIG_RTC_DRV_AB3100 is not set +CONFIG_RTC_DRV_WM831X=m +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +CONFIG_DTLK=m +CONFIG_R3964=m +# CONFIG_APPLICOM is not set +# CONFIG_SONYPI is not set + +# +# Ftape, the floppy tape device driver +# +CONFIG_AGP=y +CONFIG_AGP_ALI=y +CONFIG_AGP_ATI=y +CONFIG_AGP_AMD=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_AGP_NVIDIA=y +CONFIG_AGP_SIS=y +CONFIG_AGP_SWORKS=y +CONFIG_AGP_VIA=y +CONFIG_AGP_EFFICEON=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=m +CONFIG_DRM_TDFX=m +CONFIG_DRM_R128=m +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_KMS=y +CONFIG_DRM_I810=m +# CONFIG_DRM_I830 is not set +CONFIG_DRM_MGA=m +CONFIG_DRM_SIS=m +CONFIG_DRM_SAVAGE=m +CONFIG_DRM_I915=m +CONFIG_DRM_I915_KMS=y +CONFIG_DRM_VIA=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_NOUVEAU_DEBUG=y +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_VMWGFX=m + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set + +CONFIG_CARDMAN_4000=m +CONFIG_CARDMAN_4040=m + +CONFIG_MWAVE=m +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HANGCHECK_TIMER=m + +# +# Multimedia devices +# +CONFIG_MEDIA_SUPPORT=m +CONFIG_VIDEO_DEV=m +# CONFIG_VIDEO_ADV_DEBUG is not set +CONFIG_VIDEO_HELPER_CHIPS_AUTO=y +CONFIG_VIDEO_ALLOW_V4L1=y +CONFIG_VIDEO_V4L1_COMPAT=y +CONFIG_VIDEO_V4L2=y +# CONFIG_VIDEO_VIVI is not set + +# +# Video For Linux +# + +# +# Video Adapters +# +CONFIG_V4L_USB_DRIVERS=y +CONFIG_VIDEO_CAPTURE_DRIVERS=y +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_BT848=m +CONFIG_VIDEO_BT848_DVB=y +CONFIG_VIDEO_BWQCAM=m +# CONFIG_VIDEO_CAFE_CCIC is not set +# CONFIG_VIDEO_CPIA is not set +CONFIG_VIDEO_CPIA2=m +CONFIG_VIDEO_CQCAM=m +CONFIG_VIDEO_CX23885=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_MEYE=m +CONFIG_VIDEO_MXB=m +# CONFIG_VIDEO_OVCAMCHIP is not set +CONFIG_VIDEO_PVRUSB2_DVB=y +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_SAA5246A=m +CONFIG_VIDEO_SAA5249=m +CONFIG_VIDEO_SAA6588=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_STRADIS=m +CONFIG_VIDEO_USBVISION=m +CONFIG_VIDEO_W9966=m +CONFIG_VIDEO_ZORAN=m +CONFIG_VIDEO_ZORAN_AVS6EYES=m +CONFIG_VIDEO_ZORAN_BUZ=m +CONFIG_VIDEO_ZORAN_DC10=m +CONFIG_VIDEO_ZORAN_DC30=m +CONFIG_VIDEO_ZORAN_LML33=m +CONFIG_VIDEO_ZORAN_LML33R10=m +CONFIG_VIDEO_ZORAN_ZR36060=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_SAA7164=m +CONFIG_VIDEO_TLG2300=m + +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Radio Adapters +# +CONFIG_RADIO_GEMTEK_PCI=m +CONFIG_RADIO_MAXIRADIO=m +CONFIG_RADIO_MAESTRO=m + +CONFIG_MEDIA_ATTACH=y +CONFIG_MEDIA_TUNER_CUSTOMISE=y +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MAX2165=m + +# +# Digital Video Broadcasting Devices +# +CONFIG_DVB_CAPTURE_DRIVERS=y +CONFIG_DVB_CORE=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y + +CONFIG_DVB_FE_CUSTOMISE=y +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_MT312=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_SP8870=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_L64781=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_MT352=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_VES1820=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_STV0297=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_S5H1411=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_TDA665x=m + +# +# Supported Frontend Modules +# +CONFIG_DVB_BT8XX=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_PLUTO2=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_USB_DRV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_FRIIO=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_DM1105=m +CONFIG_DVB_DRX397XD=m +CONFIG_DVB_LGDT3304=m +CONFIG_DVB_S921=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_DUMMY_FE=m +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_NGENE=m + +# +# Supported SAA7146 based PCI Adapters +# +CONFIG_DVB_AV7110=m +CONFIG_DVB_AV7110_OSD=y +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_DVB_BUDGET_PATCH=m + +# +# Supported USB Adapters +# +CONFIG_DVB_TTUSB_BUDGET=m + +# +# Supported FlexCopII (B2C2) Adapters +# +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_USB_AZ6027=m + +CONFIG_DVB_PT1=m + +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m + +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_VIDEO_TUNER=m +CONFIG_VIDEO_BTCX=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set + +# +# Broadcom Crystal HD video decoder driver +# +CONFIG_CRYSTALHD=m + +# +# Graphics support +# + +CONFIG_DISPLAY_SUPPORT=m +CONFIG_VIDEO_OUTPUT_CONTROL=m + +CONFIG_FB=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_3DFX=m +CONFIG_FB_3DFX_ACCEL=y +CONFIG_FB_3DFX_I2C=y +# CONFIG_FB_ARC is not set +# CONFIG_FB_ARK is not set +CONFIG_FB_ATY128=m +CONFIG_FB_ATY=m +CONFIG_FB_ATY_CT=y +CONFIG_FB_ATY_GX=y +CONFIG_FB_ATY_GENERIC_LCD=y +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_CARMINE is not set +CONFIG_FB_CIRRUS=m +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_GEODE is not set +# CONFIG_FB_HECUBA is not set +# CONFIG_FB_HGA is not set +CONFIG_FB_I810=m +CONFIG_FB_I810_GTF=y +CONFIG_FB_I810_I2C=y +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_INTEL is not set +# CONFIG_FB_INTEL_DEBUG is not set +# CONFIG_FB_INTEL_I2C is not set +CONFIG_FB_KYRO=m +# CONFIG_FB_LE80578 is not set +CONFIG_FB_MATROX=m +CONFIG_FB_MATROX_MILLENIUM=y +CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB_MATROX_G=y +CONFIG_FB_MATROX_I2C=m +CONFIG_FB_MATROX_MAVEN=m +CONFIG_FB_NEOMAGIC=m +CONFIG_FB_NVIDIA=m +# CONFIG_FB_NVIDIA_DEBUG is not set +CONFIG_FB_NVIDIA_I2C=y +# CONFIG_FB_PM2 is not set +# CONFIG_FB_PM2_FIFO_DISCONNECT is not set +# CONFIG_FB_PM3 is not set +CONFIG_FB_RADEON=m +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RIVA=m +# CONFIG_FB_RIVA_DEBUG is not set +# CONFIG_FB_RIVA_I2C is not set +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_S3=m +CONFIG_FB_SAVAGE=m +CONFIG_FB_SAVAGE_I2C=y +CONFIG_FB_SAVAGE_ACCEL=y +# CONFIG_FB_SIS is not set +CONFIG_FB_SIS_300=y +CONFIG_FB_SIS_315=y +CONFIG_FB_SM501=m +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_TRIDENT=m +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_VGA16=m +CONFIG_FB_VIRTUAL=m +CONFIG_FB_VOODOO1=m +# CONFIG_FB_VT8623 is not set +CONFIG_FB_EFI=y +CONFIG_FB_VIA=m +CONFIG_FB_METRONOME=m +CONFIG_FB_MB862XX=m +CONFIG_FB_MB862XX_PCI_GDC=y +CONFIG_FB_MB862XX_LIME=y +# CONFIG_FB_PRE_INIT_FB is not set +# CONFIG_FB_TMIO is not set +# CONFIG_FB_BROADSHEET is not set + +# CONFIG_FIRMWARE_EDID is not set + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FONTS is not set + +# +# Logo configuration +# +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y + +# +# Sound +# +CONFIG_SOUND=m + +# +# Advanced Linux Sound Architecture +# +CONFIG_SND=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +# CONFIG_SND_DEBUG_VERBOSE is not set +CONFIG_SND_VERBOSE_PROCFS=y +CONFIG_SND_SEQUENCER=y +CONFIG_SND_HRTIMER=y +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=y +CONFIG_SND_PCM_OSS=y +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_RTCTIMER=y +CONFIG_SND_DYNAMIC_MINORS=y +# CONFIG_SND_SUPPORT_OLD_API is not set + +# +# Generic devices +# +CONFIG_SND_DUMMY=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MTS64=m +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_MPU401=m +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 + +CONFIG_SND_DRIVERS=y + +# +# ISA devices +# +CONFIG_SND_AD1889=m +# CONFIG_SND_WAVEFRONT is not set +# CONFIG_SND_MSND_PINNACLE is not set +# CONFIG_SND_MSND_CLASSIC is not set + +# +# PCI devices +# +CONFIG_SND_PCI=y +CONFIG_SND_ADLIB=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ALS300=m +CONFIG_SND_ALS4000=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +# CONFIG_SND_BT87X_OVERCLOCK is not set +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CS4281=m +CONFIG_SND_CS5530=m +CONFIG_SND_CS5535AUDIO=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1938=m +CONFIG_SND_ES1968=m +CONFIG_SND_FM801=m +CONFIG_SND_FM801_TEA575X_BOOL=y +CONFIG_SND_CTXFI=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_HDA_INTEL=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_INPUT_JACK=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_CA0110=y +CONFIG_SND_HDA_CODEC_ANALOG=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_VIA=y +CONFIG_SND_HDA_CODEC_ATIHDMI=y +CONFIG_SND_HDA_CODEC_CIRRUS=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_CMEDIA=y +CONFIG_SND_HDA_CODEC_INTELHDMI=y +CONFIG_SND_HDA_CODEC_SI3054=y +CONFIG_SND_HDA_CODEC_NVHDMI=y +CONFIG_SND_HDA_GENERIC=y +CONFIG_SND_HDA_POWER_SAVE=y +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDSPM=m +CONFIG_SND_HIFIER=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=y +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MIRO=m +CONFIG_SND_MIXART=m +CONFIG_SND_NM256=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_RME32=m +CONFIG_SND_PCSP=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SC6000=m +CONFIG_SND_SIS7019=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_HDSP=m +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_YMFPCI=m + +# +# ALSA USB devices +# +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_UA101=m + +# +# PCMCIA devices +# +CONFIG_SND_PCMCIA=y +CONFIG_SND_VXPOCKET=m +CONFIG_SND_PDAUDIOCF=m + +# +# Open Sound System +# +# CONFIG_SOUND_PRIME is not set + +# +# USB support +# +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +# CONFIG_USB_DEBUG is not set + +# DEPRECATED: See bug 362221. Fix udev. +# CONFIG_USB_DEVICE_CLASS is not set + + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_SUSPEND=y + +# +# USB Host Controller Drivers +# +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_OHCI_HCD=y +# CONFIG_USB_OHCI_HCD_SSB is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_CS is not set +# CONFIG_USB_R8A66597_HCD is not set +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_HCD_DEBUGGING is not set +CONFIG_USB_ISP1362_HCD=m + +# +# USB Device Class drivers +# + +# +# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m +# CONFIG_BLK_DEV_UB is not set +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_KARMA=y +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Human Interface Devices (HID) +# +CONFIG_USB_HID=y + +CONFIG_HID_SUPPORT=y + +CONFIG_HID=m +# debugging default is y upstream now +CONFIG_HIDRAW=y +CONFIG_HID_PID=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_PANTHERLORD_FF=y +CONFIG_THRUSTMASTER_FF=y +CONFIG_HID_WACOM=y +CONFIG_ZEROPLUS_FF=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_IDMOUSE=m +CONFIG_DRAGONRISE_FF=y +CONFIG_GREENASIA_FF=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_3M_PCT=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MOSART=y +CONFIG_HID_NTRIG=y +CONFIG_HID_QUANTA=y +CONFIG_HID_STANTUM=y + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m + +# +# USB Multimedia devices +# +CONFIG_DAB=y +CONFIG_USB_DABUSB=m + +CONFIG_USB_VICAM=m +CONFIG_USB_DSBR=m +# CONFIG_USB_ET61X251 is not set +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SN9C20X_EVDEV=y +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_JEILINJ=m + +CONFIG_USB_IBMCAM=m +CONFIG_USB_KONICAWC=m +# CONFIG_USB_OV511 is not set +CONFIG_USB_S2255=m +CONFIG_USB_SE401=m +# CONFIG_VIDEO_SH_MOBILE_CEU is not set +# CONFIG_USB_STV680 is not set +# CONFIG_USB_SN9C102 is not set +CONFIG_USB_ZR364XX=m +CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_MT9M001=m +CONFIG_SOC_CAMERA_MT9V022=m +CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_SOC_CAMERA_MT9M111=m +CONFIG_SOC_CAMERA_MT9T031=m +CONFIG_SOC_CAMERA_TW9910=m +CONFIG_SOC_CAMERA_OV772X=m +CONFIG_SOC_CAMERA_MT9T112=m +CONFIG_SOC_CAMERA_RJ54N1=m +CONFIG_SOC_CAMERA_OV9640=m + +# +# USB Network adaptors +# +CONFIG_USB_CATC=m +CONFIG_USB_HSO=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_USBNET=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_CDC_PHONET=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m + +# +# USB Host-to-Host Cables +# +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y + +# +# Intelligent USB Devices/Gadgets +# +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y + +# CONFIG_USB_MUSB_HDRC is not set + +# +# USB port drivers +# +CONFIG_USB_USS720=m + +# +# USB Serial Converter support +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_FUNSOFT=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_HP4X=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KEYSPAN_MPR=y +CONFIG_USB_SERIAL_KEYSPAN_USA28=y +CONFIG_USB_SERIAL_KEYSPAN_USA28X=y +CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y +CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y +CONFIG_USB_SERIAL_KEYSPAN_USA19=y +CONFIG_USB_SERIAL_KEYSPAN_USA18X=y +CONFIG_USB_SERIAL_KEYSPAN_USA19W=y +CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y +CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y +CONFIG_USB_SERIAL_KEYSPAN_USA49W=y +CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MOTOROLA=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_OPTION=y +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SIEMENS_MPI=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m +CONFIG_USB_SERIAL_DEBUG=m + +CONFIG_USB_EZUSB=y +CONFIG_USB_EMI62=m +CONFIG_USB_LED=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +CONFIG_USB_G_SERIAL=m + +# +# USB Miscellaneous drivers +# + +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_ATM=m +CONFIG_USB_BERRY_CHARGE=m +CONFIG_USB_CXACRU=m +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_EMI26=m +CONFIG_USB_ETH=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_FILE_STORAGE=m +# CONFIG_USB_FILE_STORAGE_TEST is not set +# CONFIG_USB_GADGET is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_VST=m +CONFIG_USB_LCD=m +CONFIG_USB_LD=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_MON=y +CONFIG_USB_PWC=m +CONFIG_USB_PWC_INPUT_EVDEV=y +# CONFIG_USB_PWC_DEBUG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_QUICKCAM_MESSENGER is not set +CONFIG_USB_SL811_HCD=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_RADIO_SI470X=y +CONFIG_USB_SI470X=m +CONFIG_I2C_SI470X=m +CONFIG_RADIO_SI4713=m +# CONFIG_RADIO_TEF6862 is not set +CONFIG_USB_MR800=m +CONFIG_USB_STKWEBCAM=m +# CONFIG_USB_TEST is not set +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_U132_HCD=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +# CONFIG_USB_ZC0301 is not set +CONFIG_USB_ZERO=m + +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Sonics Silicon Backplane +# +CONFIG_SSB=m +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_SDIOHOST=y +CONFIG_SSB_PCMCIAHOST=y +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE=y + +# Multifunction USB devices +# CONFIG_MFD_PCF50633 is not set +CONFIG_PCF50633_ADC=m +CONFIG_PCF50633_GPIO=m +# CONFIG_AB3100_CORE is not set +CONFIG_INPUT_PCF50633_PMU=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_CHARGER_PCF50633=m +CONFIG_RTC_DRV_PCF50633=m + +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_TC6393XB is not set +CONFIG_MFD_WM8400=m +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8350 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_AB3100_OTP is not set +# CONFIG_MFD_TIMBERDALE is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_LPC_SCH is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_MAX8925 is not set + +# +# File systems +# +CONFIG_MISC_FILESYSTEMS=y + +CONFIG_EXT2_FS=m +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT2_FS_XIP=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_DEFAULTS_TO_ORDERED=y +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_JBD2=y +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +# CONFIG_XFS_DEBUG is not set +# CONFIG_XFS_RT is not set +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_DNOTIFY=y +# Autofsv3 is obsolete. +# CONFIG_AUTOFS_FS is not set +CONFIG_AUTOFS4_FS=m +CONFIG_EXOFS_FS=m +# CONFIG_EXOFS_DEBUG is not set +CONFIG_NILFS2_FS=m +CONFIG_LOGFS=m +CONFIG_CEPH_FS=m + +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_FSCACHE_OBJECT_LIST=y + +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_DEBUG_FS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +# CONFIG_BEFS_DEBUG is not set +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_VXFS_FS=m +# CONFIG_HPFS_FS is not set +CONFIG_QNX4FS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_FUSE_FS=m +CONFIG_OMFS_FS=m +CONFIG_CUSE=m + +# +# Network File Systems +# +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFS_FSCACHE=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_SPKM3=m +# CONFIG_SMB_FS is not set +# CONFIG_SMB_NLS_DEFAULT is not set +CONFIG_CIFS=m +CONFIG_CIFS_STATS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_EXPERIMENTAL=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_DEBUG2 is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_NCP_FS=m +CONFIG_NCPFS_PACKET_SIGNING=y +CONFIG_NCPFS_IOCTL_LOCKING=y +CONFIG_NCPFS_STRONG=y +CONFIG_NCPFS_NFS_NS=y +CONFIG_NCPFS_OS2_NS=y +CONFIG_NCPFS_SMALLDOS=y +CONFIG_NCPFS_NLS=y +CONFIG_NCPFS_EXTRAS=y +CONFIG_CODA_FS=m +# CONFIG_AFS_FS is not set +# CONFIG_AF_RXRPC is not set + +CONFIG_OCFS2_FS=m +# CONFIG_OCFS2_DEBUG_FS is not set +# CONFIG_OCFS2_DEBUG_MASKLOG is not set +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +# CONFIG_OCFS2_FS_STATS is not set + +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y + +CONFIG_CONFIGFS_FS=m + +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y + +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_XATTR=y +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +# CONFIG_UBIFS_FS_DEBUG is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_BSD_DISKLABEL=y +CONFIG_EFI_PARTITION=y +CONFIG_KARMA_PARTITION=y +# CONFIG_LDM_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_SUN_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_ULTRIX_PARTITION is not set + +CONFIG_NLS=y + +# +# Native Language Support +# +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_ASCII=y + +# +# Profiling support +# +CONFIG_PROFILING=y +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_INFO=y +CONFIG_FRAME_POINTER=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +# CONFIG_DEBUG_DRIVER is not set +CONFIG_HEADERS_CHECK=y +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_LKDTM is not set + +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_LOCKDEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set + +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set + +# +# Security options +# +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_SELINUX_AVC_STATS=y +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y + +# +# Cryptographic options +# +CONFIG_CRYPTO=y +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_MANAGER=m +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AES=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SALSA20_586=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_TEST=m +CONFIG_LIBCRC32C=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DEV_HIFN_795X=m +CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y +CONFIG_CRYPTO_PCRYPT=m + +# Random number generation + +# +# Library routines +# +CONFIG_CRC16=y +CONFIG_CRC32=m +CONFIG_CRC_CCITT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC_T10DIF=m + +CONFIG_CRYPTO_ZLIB=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m + +CONFIG_INITRAMFS_SOURCE="" +CONFIG_KEYS=y +CONFIG_KEYS_DEBUG_PROC_KEYS=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set + +CONFIG_ATA_OVER_ETH=m +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=m +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PROGEAR=m +CONFIG_FB_NVIDIA_BACKLIGHT=y +CONFIG_FB_RIVA_BACKLIGHT=y +CONFIG_FB_RADEON_BACKLIGHT=y +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY_BACKLIGHT=y +# CONFIG_BACKLIGHT_SAHARA is not set +CONFIG_BACKLIGHT_WM831X=m + +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m + +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_DEBUG=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y + +CONFIG_CPUSETS=y +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_NS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_PROC_PID_CPUSET=y + +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_SYSFS_DEPRECATED_V2 is not set + +CONFIG_RELAY=y +# CONFIG_PRINTK_TIME is not set + +CONFIG_ENABLE_MUST_CHECK=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set + +CONFIG_KEXEC=y + +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +CONFIG_IBMASR=m + +CONFIG_PM_DEBUG=y +CONFIG_PM_TRACE=y +# CONFIG_PM_VERBOSE is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_RUNTIME=y + +## BEGIN ISA Junk. + +CONFIG_I82365=m +# CONFIG_TCIC is not set +# CONFIG_PCMCIA_PROBE is not set +# CONFIG_LTPC is not set +# CONFIG_COPS is not set + +CONFIG_SCSI_AHA152X=m +CONFIG_SCSI_AHA1542=m +# CONFIG_SCSI_IN2000 is not set +CONFIG_SCSI_ARCMSR=m +CONFIG_SCSI_ARCMSR_AER=y +# CONFIG_SCSI_DTC3280 is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set +# CONFIG_SCSI_NCR53C406A is not set +# CONFIG_SCSI_PAS16 is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_SYM53C416 is not set +# CONFIG_SCSI_T128 is not set +# CONFIG_SCSI_U14_34F is not set +# CONFIG_SCSI_ULTRASTOR is not set + +# CONFIG_EL1 is not set +# CONFIG_EL2 is not set +# CONFIG_ELPLUS is not set +# CONFIG_EL16 is not set +CONFIG_EL3=m +# CONFIG_3C515 is not set +# CONFIG_LANCE is not set +CONFIG_NET_VENDOR_SMC=y +# CONFIG_WD80x3 is not set +CONFIG_ULTRA=m +# CONFIG_SMC9194 is not set +# CONFIG_NET_VENDOR_RACAL is not set +# CONFIG_NI52 is not set +# CONFIG_NI65 is not set +# CONFIG_AT1700 is not set +# CONFIG_DEPCA is not set +CONFIG_NET_ISA=y +CONFIG_NE2000=m +# CONFIG_E2100 is not set +CONFIG_EWRK3=m +# CONFIG_EEXPRESS is not set +# CONFIG_EEXPRESS_PRO is not set +# CONFIG_HPLAN_PLUS is not set +# CONFIG_HPLAN is not set +# CONFIG_LP486E is not set +# CONFIG_ETH16I is not set +# CONFIG_ZNET is not set +# CONFIG_SEEQ8005 is not set +# CONFIG_AC3200 is not set +# CONFIG_APRICOT is not set +# CONFIG_CS89x0 is not set +# CONFIG_IBMTR is not set +# CONFIG_SKISA is not set +# CONFIG_PROTEON is not set +# CONFIG_SMCTR is not set +# CONFIG_WAVELAN is not set +# CONFIG_HISAX_16_0 is not set +# CONFIG_HISAX_AVM_A1 is not set +# CONFIG_HISAX_IX1MICROR2 is not set +# CONFIG_HISAX_ASUSCOM is not set +# CONFIG_HISAX_TELEINT is not set +# CONFIG_HISAX_HFCS is not set +# CONFIG_HISAX_SPORTSTER is not set +# CONFIG_HISAX_MIC is not set +# CONFIG_HISAX_ISURF is not set +# CONFIG_HISAX_HSTSAPHIR is not set +# CONFIG_ISDN_DRV_ICN is not set +# CONFIG_ISDN_DRV_PCBIT is not set +# CONFIG_ISDN_DRV_SC is not set +# CONFIG_ISDN_DRV_ACT2000 is not set +# CONFIG_ISDN_DRV_AVMB1_B1ISA is not set +# CONFIG_ISDN_DRV_AVMB1_T1ISA is not set + +# CONFIG_MOUSE_INPORT is not set +# CONFIG_MOUSE_ATIXL is not set +# CONFIG_MOUSE_LOGIBM is not set +# CONFIG_MOUSE_PC110PAD is not set + +# CONFIG_SERIAL_8250_FOURPORT is not set +# CONFIG_SERIAL_8250_ACCENT is not set +# CONFIG_SERIAL_8250_BOCA is not set +# CONFIG_SERIAL_8250_HUB6 is not set +# CONFIG_SERIAL_8250_EXAR_ST16C554 is not set + +# CONFIG_PCWATCHDOG is not set +# CONFIG_WDT is not set + +# CONFIG_VIDEO_PMS is not set +CONFIG_RADIO_ADAPTERS=y +# CONFIG_RADIO_CADET is not set +# CONFIG_RADIO_RTRACK is not set +# CONFIG_RADIO_RTRACK2 is not set +# CONFIG_RADIO_AZTECH is not set +# CONFIG_RADIO_GEMTEK is not set +# CONFIG_RADIO_SF16FMI is not set +# CONFIG_RADIO_SF16FMR2 is not set +# CONFIG_RADIO_TERRATEC is not set +# CONFIG_RADIO_TRUST is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TYPHOON is not set +# CONFIG_RADIO_ZOLTRIX is not set +# CONFIG_RADIO_SAA7706H is not set + +# CONFIG_SND_OPL4_LIB is not set +# CONFIG_SND_AD1816A is not set +# CONFIG_SND_AD1848 is not set +# CONFIG_SND_CS4231 is not set +CONFIG_SND_CS4236=m +# CONFIG_SND_ES968 is not set +# CONFIG_SND_ES1688 is not set +# CONFIG_SND_ES18XX is not set +# CONFIG_SND_GUSCLASSIC is not set +# CONFIG_SND_GUSEXTREME is not set +# CONFIG_SND_GUSMAX is not set +# CONFIG_SND_INTERWAVE is not set +# CONFIG_SND_JAZZ16 is not set +# CONFIG_SND_INTERWAVE_STB is not set +# CONFIG_SND_OPTI92X_AD1848 is not set +# CONFIG_SND_OPTI92X_CS4231 is not set +# CONFIG_SND_OPTI93X is not set +# CONFIG_SND_SB8 is not set +CONFIG_SND_SB16=m +CONFIG_SND_SBAWE=m +# CONFIG_SND_SB16_CSP is not set +# CONFIG_SND_ALS100 is not set +# CONFIG_SND_AZT2320 is not set +# CONFIG_SND_CMI8330 is not set +# CONFIG_SND_DT019X is not set +CONFIG_SND_OPL3SA2=m +# CONFIG_SND_SGALAXY is not set +# CONFIG_SND_SSCAPE is not set +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +# CONFIG_SND_SOC is not set + +## END of ISA options. + + +CONFIG_MIGRATION=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_AMS_DELTA is not set +# CONFIG_LEDS_LOCOMO is not set +# CONFIG_LEDS_NET48XX is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_S3C24XX is not set +CONFIG_LEDS_DELL_NETBOOKS=m +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_ALIX2=m +CONFIG_LEDS_WM8350=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_WM831X_STATUS=m +CONFIG_LEDS_REGULATOR=m +CONFIG_LEDS_LT3593=m +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_INTEL_SS4200=m + +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_NET_DMA=y +# CONFIG_DMATEST is not set +CONFIG_ASYNC_TX_DMA=y + +CONFIG_UNUSED_SYMBOLS=y + +CONFIG_UTRACE=y + +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_WORKQUEUE_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_KMEMTRACE=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_RING_BUFFER_BENCHMARK=m +CONFIG_FUNCTION_TRACER=y +CONFIG_STACK_TRACER=y +CONFIG_DYNAMIC_FTRACE=y + +CONFIG_KPROBES=y +CONFIG_OPTPROBES=y + +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set + +CONFIG_HZ_1000=y + +CONFIG_TIMER_STATS=y + +# Auxillary displays +CONFIG_KS0108=m +CONFIG_KS0108_PORT=0x378 +CONFIG_KS0108_DELAY=2 +CONFIG_CFAG12864B=y +CONFIG_CFAG12864B_RATE=20 + +# CONFIG_PHANTOM is not set + +CONFIG_POWER_SUPPLY=m +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_APM_POWER=m +CONFIG_WM831X_POWER=m +# CONFIG_BATTERY_DS2760 is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_PMU=m +CONFIG_BATTERY_BQ27x00=m +CONFIG_BATTERY_MAX17040=m +# CONFIG_PDA_POWER is not set + +CONFIG_AUXDISPLAY=y + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_SMX=m +CONFIG_UIO_PDRV=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set + +# CONFIG_CRC7 is not set + + +# LIRC +CONFIG_INPUT_LIRC=m +CONFIG_LIRC_BT829=m +CONFIG_LIRC_ENE0100=m +CONFIG_LIRC_I2C=m +CONFIG_LIRC_IGORPLUGUSB=m +CONFIG_LIRC_IMON=m +CONFIG_LIRC_IT87=m +CONFIG_LIRC_ITE8709=m +CONFIG_LIRC_MCEUSB=m +CONFIG_LIRC_ZILOG=m +CONFIG_LIRC_PARALLEL=m +CONFIG_LIRC_SERIAL=m +CONFIG_LIRC_SERIAL_TRANSMITTER=y +CONFIG_LIRC_SASEM=m +CONFIG_LIRC_SIR=m +CONFIG_LIRC_STREAMZAP=m +CONFIG_LIRC_TTUSBIR=m + +# CONFIG_SAMPLES is not set + +# CONFIG_DEVKMEM is not set + +CONFIG_PM_TRACE_RTC=y +CONFIG_R6040=m + +CONFIG_BNX2X=m +CONFIG_NOZOMI=m +# CONFIG_TPS65010 is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +CONFIG_LATENCYTOP=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_COMPAT_BRK is not set + + +#FIXME: x86 generic? +CONFIG_LEDS_CLEVO_MAIL=m +CONFIG_I8K=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_X38=m +CONFIG_INPUT_APANEL=m + +# CONFIG_INTEL_MENLOW is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_ISL29003=m +CONFIG_IPWIRELESS=m +CONFIG_RTC_DRV_DS1511=m + +# CONFIG_BLK_DEV_XIP is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m + +CONFIG_ACCESSIBILITY=y +CONFIG_A11Y_BRAILLE_CONSOLE=y + +# CONFIG_HTC_PASIC3 is not set + +# MT9V022_PCA9536_SWITCH is not set + +CONFIG_THERMAL_HWMON=y + +CONFIG_OPTIMIZE_INLINING=y + +# FIXME: This should be x86/ia64 only +# CONFIG_HP_ILO is not set + +# CONFIG_GPIOLIB is not set + + +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +# CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=m + +# CONFIG_IP_VS_IPV6 is not set + +CONFIG_NET_DSA=y +CONFIG_NET_DSA_MV88E6060=y +CONFIG_NET_DSA_MV88E6131=y +CONFIG_NET_DSA_MV88E6123_61_65=y + +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_ACT_SKBEDIT=m + +CONFIG_PHONET=m + +CONFIG_ICS932S401=m +# CONFIG_C2PORT is not set +CONFIG_W1_SLAVE_BQ27000=m + + +CONFIG_IT87_WDT=m +CONFIG_W83697UG_WDT=m + +# CONFIG_REGULATOR is not set +# CONFIG_REGULATOR_DEBUG is not set + +CONFIG_WM8350_POWER=m + +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set + +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set +CONFIG_USB_WHCI_HCD=m +CONFIG_USB_HWA_HCD=m + +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_WLP=m +CONFIG_UWB_I1480U=m +CONFIG_UWB_I1480U_WLP=m + +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ET131X is not set +# CONFIG_SLICOSS is not set +# CONFIG_VIDEO_GO7007 is not set +# CONFIG_USB_IP_COMMON is not set +# CONFIG_DT3155 is not set +# CONFIG_W35UND is not set +# CONFIG_PRISM2_USB is not set +# CONFIG_ECHO is not set +CONFIG_USB_ATMEL=m +# CONFIG_POCH is not set +# CONFIG_OTUS is not set +# CONFIG_RT2860 is not set +# CONFIG_RT2870 is not set +# CONFIG_COMEDI is not set +# CONFIG_ASUS_OLED is not set +# CONFIG_PANEL is not set +# CONFIG_ALTERA_PCIE_CHDMA is not set +# CONFIG_INPUT_MIMIO is not set +# CONFIG_TRANZPORT is not set +# CONFIG_POHMELFS is not set +# CONFIG_B3DFG is not set +# CONFIG_IDE_PHISON is not set +# CONFIG_PLAN9AUTH is not set +# CONFIG_LINE6_USB is not set +# CONFIG_RTL8192SU is not set +# CONFIG_IIO is not set +# CONFIG_VME_BUS is not set +# CONFIG_RAR_REGISTER is not set +# CONFIG_VT6656 is not set +# CONFIG_USB_SERIAL_QUATECH_USB2 is not set +# CONFIG_RTL8192E is not set +# CONFIG_INPUT_GPIO is not set +# CONFIG_VIDEO_CX25821 is not set +# CONFIG_HYPERV is not set +# CONFIG_R8187SE is not set +# CONFIG_RTL8192U is not set +# CONFIG_RAMZSWAP is not set +# CONFIG_BATMAN_ADV is not set +# CONFIG_FB_SM7XX is not set + +# +# Android +# + +# CONFIG_DEBUG_VIRTUAL is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_FUNCTION_GRAPH_TRACER is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_EARLY_PRINTK_DBGP=y + +CONFIG_SECURITYFS=y + +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set + +CONFIG_NOP_USB_XCEIV=m + +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_AUDIT=y +CONFIG_IMA_LSM_RULES=y + +CONFIG_LSM_MMAP_MIN_ADDR=65536 + +# CONFIG_PAGE_POISONING is not set + +CONFIG_SLOW_WORK=y +CONFIG_SLOW_WORK_DEBUG=y + +# CONFIG_CRASH_DUMP is not set +# CONFIG_CRASH is not set + +CONFIG_STRIP_ASM_SYMS=y + +# CONFIG_RCU_FANOUT_EXACT is not set +CONFIG_RCU_FAST_NO_HZ=y + +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 + +CONFIG_FSNOTIFY=y + +CONFIG_IEEE802154=m +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKEHARD=m + +# CONFIG_GCOV_KERNEL is not set + +### is this generally useful? +# CONFIG_PPS is not set +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_DEBUG is not set + +# CONFIG_USB_SERIAL_QUATECH2 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_UDL is not set + +# DEBUG options that don't get enabled/disabled with 'make debug/release' +# +# Kmemleak still produces a lot of false positives. +# CONFIG_DEBUG_KMEMLEAK is not set +# +# This generates a huge amount of dmesg spew +# CONFIG_DEBUG_KOBJECT is not set +# +# +# These debug options are deliberatly left on (even in 'make release' kernels). +# They aren't that much of a performance impact, and the value +# from getting useful bug-reports makes it worth leaving them on. +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_HIGHMEM=y +CONFIG_DEBUG_SPINLOCK_SLEEP=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DEBUG_LIST=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DEBUG_DEVRES=y +CONFIG_DEBUG_RODATA_TEST=y +CONFIG_DEBUG_NX_TEST=m +CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set + +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m + +CONFIG_BLK_DEV_DRBD=m + +# CONFIG_MDIO_GPIO is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_DEBUG_GPIO is not set +# CONFIG_W1_MASTER_GPIO is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_GPIO_SYSFS is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_CS5535 is not set +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_IT8761E is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_RADIO_MIROPCM20 is not set +# CONFIG_USB_GPIO_VBUS is not set + + +CONFIG_KSYM_TRACER=y +CONFIG_PROFILE_KSYM_TRACER=y +CONFIG_KPROBE_EVENT=y diff --git a/config-i686-PAE b/config-i686-PAE new file mode 100644 index 000000000..1e58e65b2 --- /dev/null +++ b/config-i686-PAE @@ -0,0 +1,5 @@ +# CONFIG_HIGHMEM4G is not set +CONFIG_HIGHMEM64G=y + +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_SYS_HYPERVISOR=y diff --git a/config-ia64-generic b/config-ia64-generic new file mode 100644 index 000000000..746fdf2e0 --- /dev/null +++ b/config-ia64-generic @@ -0,0 +1,205 @@ +# +# Automatically generated make config: don't edit +# + +# +# Processor type and features +# +CONFIG_IA64=y +CONFIG_64BIT=y +# CONFIG_XEN is not set +CONFIG_MMU=y +CONFIG_EFI=y +# CONFIG_ITANIUM is not set +CONFIG_MCKINLEY=y +CONFIG_IA64_GENERIC=y +# CONFIG_IA64_DIG is not set +# CONFIG_IA64_HP_ZX1 is not set +# CONFIG_IA64_SGI_SN2 is not set +CONFIG_IA64_ESI=y +CONFIG_IA64_HP_AML_NFW=y +CONFIG_MSPEC=y +# CONFIG_IA64_HP_SIM is not set +# CONFIG_IA64_PAGE_SIZE_4KB is not set +# CONFIG_IA64_PAGE_SIZE_8KB is not set +CONFIG_IA64_PAGE_SIZE_16KB=y +# CONFIG_IA64_PAGE_SIZE_64KB is not set +CONFIG_IA64_L1_CACHE_SHIFT=7 +CONFIG_NUMA=y +# CONFIG_VIRTUAL_MEM_MAP is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_IA64_MCA_RECOVERY=m +CONFIG_IA64_CYCLONE=y +CONFIG_MMTIMER=y +CONFIG_IOSAPIC=y +CONFIG_FORCE_MAX_ZONEORDER=18 +CONFIG_NR_CPUS=1024 +# CONFIG_IA32_SUPPORT is not set +# CONFIG_COMPAT is not set +CONFIG_PERFMON=y +CONFIG_IA64_PALINFO=y +CONFIG_EFI_VARS=y +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_EFI_PCDP=y +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_BLK_DEV_SGIIOC4=y + +# +# Character devices +# +CONFIG_TCG_INFINEON=m + +# +# Watchdog Cards +# +# CONFIG_HW_RANDOM is not set +# CONFIG_GEN_RTC is not set +CONFIG_EFI_RTC=y +CONFIG_RTC_DRV_EFI=y + + +# +# AGP +# +CONFIG_AGP_I460=y +CONFIG_AGP_HP_ZX1=y +CONFIG_AGP_SGI_TIOCA=y + +# +# HP Simulator drivers +# +# CONFIG_HP_SIMETH is not set +# CONFIG_HP_SIMSERIAL is not set +# CONFIG_HP_SIMSCSI is not set + +# +# Kernel hacking +# +# CONFIG_IA64_PRINT_HAZARDS is not set +# CONFIG_DISABLE_VHPT is not set +# CONFIG_IA64_DEBUG_CMPXCHG is not set +# CONFIG_IA64_DEBUG_IRQ is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# SGI +# +CONFIG_SGI_SNSC=y +CONFIG_SGI_TIOCX=y +CONFIG_SGI_MBCS=m +CONFIG_SGI_IOC3=m +CONFIG_SGI_IOC4=y +CONFIG_SGI_XP=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_SERIAL_SGI_L1_CONSOLE=y +CONFIG_SERIAL_SGI_IOC3=m +CONFIG_SERIAL_SGI_IOC4=m + + +# +# SCSI low-level drivers +# +# CONFIG_SCSI_BUSLOGIC is not set + +# +CONFIG_ACPI=y +CONFIG_ACPI_AC=y +# CONFIG_ACPI_ASUS is not set +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_SYSFS_POWER=y +# CONFIG_ACPI_BATTERY is not set +CONFIG_ACPI_BLACKLIST_YEAR=0 +CONFIG_ACPI_BUTTON=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_FAN=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_POWER=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_PROCFS=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_THERMAL=y +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_ACPI_VIDEO=m +# CONFIG_ACPI_PROC_EVENT is not set + +CONFIG_PM=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_HPET is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=m +CONFIG_HOTPLUG_PCI_SGI=m +CONFIG_PNPACPI=y + +CONFIG_SCHED_SMT=y + +CONFIG_ARCH_DISCONTIGMEM_ENABLE=y + +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEBUG=y +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_STAT=m +CONFIG_CPU_FREQ_STAT_DETAILS=y + +CONFIG_IA64_ACPI_CPUFREQ=m + +# CONFIG_PERMIT_BSP_REMOVE is not set +# CONFIG_FORCE_CPEI_RETARGET is not set + +CONFIG_NODES_SHIFT=10 + + +CONFIG_HW_RANDOM_INTEL=m + +CONFIG_CRASH_DUMP=y +CONFIG_PROC_VMCORE=y + +# drivers/media/video/usbvision/usbvision-i2c.c:64:39: error: macro "outb" passed 4 arguments, but takes just 2 +# CONFIG_VIDEO_USBVISION is not set + +# CONFIG_IA64_MC_ERR_INJECT is not set + +CONFIG_DMIID=y + +CONFIG_SENSORS_I5K_AMB=m + +CONFIG_SPARSEMEM_VMEMMAP=y + +CONFIG_FRAME_WARN=2048 + +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m + +CONFIG_HP_ILO=m + +CONFIG_PARAVIRT_GUEST=y +CONFIG_PARAVIRT=y + +CONFIG_DMAR_DEFAULT_ON=y + +CONFIG_RCU_FANOUT=64 + +CONFIG_ACPI_POWER_METER=m +CONFIG_I2C_SCMI=m diff --git a/config-nodebug b/config-nodebug new file mode 100644 index 000000000..dcbb617ae --- /dev/null +++ b/config-nodebug @@ -0,0 +1,83 @@ +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_DEBUG=y +CONFIG_SND_PCM_XRUN_DEBUG=y + +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_PROVE_RCU is not set + +# CONFIG_FAULT_INJECTION is not set +# CONFIG_FAILSLAB is not set +# CONFIG_FAIL_PAGE_ALLOC is not set +# CONFIG_FAIL_MAKE_REQUEST is not set +# CONFIG_FAULT_INJECTION_DEBUG_FS is not set +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set +# CONFIG_FAIL_IO_TIMEOUT is not set + +# CONFIG_SLUB_DEBUG_ON is not set + +# CONFIG_LOCK_STAT is not set + +# CONFIG_DEBUG_STACK_USAGE is not set + +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_DEBUG_FUNC_TRACE is not set + +# CONFIG_DEBUG_SG is not set + +# CONFIG_DEBUG_PAGEALLOC is not set + +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +# CONFIG_DEBUG_OBJECTS_FREE is not set +# CONFIG_DEBUG_OBJECTS_TIMERS is not set +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 + +# CONFIG_X86_PTDUMP is not set + +# CONFIG_CAN_DEBUG_DEVICES is not set + +# CONFIG_MODULE_FORCE_UNLOAD is not set + +# CONFIG_SYSCTL_SYSCALL_CHECK is not set + +# CONFIG_DEBUG_NOTIFIERS is not set + +# CONFIG_DMA_API_DEBUG is not set + +# CONFIG_MMIOTRACE is not set + +# CONFIG_DEBUG_CREDENTIALS is not set + +# off in both production debug and nodebug builds, +# on in rawhide nodebug builds +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set + +# CONFIG_EXT4_DEBUG is not set + +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set + +# CONFIG_JBD2_DEBUG is not set + +# CONFIG_DEBUG_CFQ_IOSCHED is not set + +# CONFIG_DRBD_FAULT_INJECTION is not set + +# CONFIG_ATH_DEBUG is not set +# CONFIG_IWLWIFI_DEVICE_TRACING is not set + +# CONFIG_DEBUG_OBJECTS_WORK is not set +# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set + +# CONFIG_DMADEVICES_DEBUG is not set +# CONFIG_DMADEVICES_VDEBUG is not set + +CONFIG_PM_ADVANCED_DEBUG=y + +# CONFIG_CEPH_FS_PRETTYDEBUG is not set +# CONFIG_QUOTA_DEBUG is not set diff --git a/config-powerpc-generic b/config-powerpc-generic new file mode 100644 index 000000000..ceace8285 --- /dev/null +++ b/config-powerpc-generic @@ -0,0 +1,331 @@ +# Most PowerPC kernels we build are SMP +CONFIG_SMP=y +CONFIG_IRQ_ALL_CPUS=y +CONFIG_PPC=y +CONFIG_WATCHDOG_RTAS=m +CONFIG_DEBUGGER=y +CONFIG_GENERIC_NVRAM=y +CONFIG_ALTIVEC=y + +CONFIG_TAU=y +# CONFIG_TAU_INT is not set +CONFIG_TAU_AVERAGE=y + +CONFIG_SECCOMP=y + +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEBUG=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=m +CONFIG_CPU_FREQ_STAT_DETAILS=y + +CONFIG_PM=y + +CONFIG_PM_STD_PARTITION="" + +CONFIG_SUSPEND=y +CONFIG_HIBERNATION=y +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_GEN_RTC_X is not set +CONFIG_RTC_DRV_GENERIC=y +CONFIG_PROC_DEVICETREE=y +# CONFIG_CMDLINE_BOOL is not set + +CONFIG_ADB=y +CONFIG_ADB_PMU=y +CONFIG_WINDFARM=y +CONFIG_WINDFARM_PM112=y +CONFIG_I2C_POWERMAC=y +CONFIG_APPLE_AIRPORT=m +CONFIG_SERIAL_PMACZILOG=m +# CONFIG_SERIAL_PMACZILOG_TTYS is not set +CONFIG_AGP_UNINORTH=y +CONFIG_FB_OF=y +# CONFIG_FB_CONTROL is not set +CONFIG_FB_IBM_GXT4500=y +CONFIG_FB_RADEON=y +CONFIG_FB_MATROX=y +CONFIG_FB_NVIDIA=m +# CONFIG_FB_VGA16 is not set +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY_BACKLIGHT=y +CONFIG_FB_RADEON_BACKLIGHT=y +CONFIG_FB_RIVA_BACKLIGHT=y +CONFIG_FB_NVIDIA_BACKLIGHT=y + +CONFIG_SND_POWERMAC=m +CONFIG_SND_POWERMAC_AUTO_DRC=y +CONFIG_SND_AOA=m +CONFIG_SND_AOA_SOUNDBUS=m +CONFIG_SND_AOA_FABRIC_LAYOUT=m +CONFIG_SND_AOA_ONYX=m +CONFIG_SND_AOA_TAS=m +CONFIG_SND_AOA_TOONIE=m +CONFIG_SND_AOA_SOUNDBUS_I2S=m + +CONFIG_XMON=y +# CONFIG_XMON_DEFAULT is not set +CONFIG_XMON_DISASSEMBLY=y + +CONFIG_BOOTX_TEXT=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_CAPI_EICON=y + +CONFIG_NVRAM=y + +# CONFIG_PCMCIA_M8XX is not set +# CONFIG_SCSI_AHA1542 is not set +# CONFIG_SCSI_IN2000 is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_NI52 is not set +# CONFIG_NI65 is not set +# CONFIG_LANCE is not set +# CONFIG_3C515 is not set +# CONFIG_ELPLUS is not set + +CONFIG_MEMORY_HOTPLUG=y + +# Stuff which wants bus_to_virt() or virt_to_bus() +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_VIDEO_STRADIS is not set +# CONFIG_VIDEO_ZORAN is not set +# CONFIG_ATM_HORIZON is not set +# CONFIG_ATM_FIRESTREAM is not set +# CONFIG_ATM_AMBASSADOR is not set +# CONFIG_SCSI_DC390T is not set +# CONFIG_SCSI_BUSLOGIC is not set + + +# CONFIG_PPC_EARLY_DEBUG is not set + +# CONFIG_PMAC_BACKLIGHT_LEGACY is not set +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_GPIO=m + +# FIXME: Should depend on IA64/x86 +# CONFIG_SGI_IOC4 is not set + +CONFIG_PPC_EFIKA=y +CONFIG_PPC_MEDIA5200=y + +# CONFIG_PPC_LITE5200 is not set +CONFIG_PPC_BESTCOMM=y +CONFIG_PMAC_RACKMETER=m +CONFIG_USB_OHCI_HCD_PPC_SOC=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PPC_OF=y +CONFIG_USB_OHCI_HCD_PPC_OF_BE=y +CONFIG_USB_OHCI_HCD_PPC_OF_LE=y + +CONFIG_SERIAL_UARTLITE=m +CONFIG_SERIAL_UARTLITE_CONSOLE=y + +CONFIG_SENSORS_AMS=m +CONFIG_SENSORS_AMS_PMU=y +CONFIG_SENSORS_AMS_I2C=y + +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +# CONFIG_BLK_DEV_IDE_SATA is not set +# CONFIG_BLK_DEV_IDECS is not set +CONFIG_BLK_DEV_IDECD=m +# CONFIG_BLK_DEV_IDETAPE is not set +CONFIG_IDE_TASK_IOCTL=y +# +# IDE chipset support/bugfixes +# +# CONFIG_IDE_GENERIC is not set +# CONFIG_BLK_DEV_IDEPNP is not set +# CONFIG_BLK_DEV_IDEPCI is not set +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_CY82C693 is not set +# CONFIG_BLK_DEV_CS5520 is not set +# CONFIG_BLK_DEV_CS5530 is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_JMICRON is not set +# CONFIG_BLK_DEV_SC1200 is not set +# CONFIG_BLK_DEV_PIIX is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SL82C105 is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +CONFIG_BLK_DEV_IDE_PMAC=y +CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y +CONFIG_BLK_DEV_IDEDMA=y +# CONFIG_BLK_DEV_HD is not set +# CONFIG_USB_STORAGE_ISD200 is not set +CONFIG_MTD_PHYSMAP_OF=m +CONFIG_IDE_PROC_FS=y +CONFIG_MACINTOSH_DRIVERS=y + +CONFIG_PPC_PASEMI_MDIO=m +CONFIG_SPU_FS_64K_LS=y +CONFIG_PPC_PASEMI_CPUFREQ=y +CONFIG_PMAC_APM_EMU=m +CONFIG_HW_RANDOM_PASEMI=m + +CONFIG_EDAC=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_PASEMI=m +CONFIG_EDAC_AMD8131=m +CONFIG_EDAC_AMD8111=m + +CONFIG_AXON_RAM=m +CONFIG_OPROFILE_CELL=y + +CONFIG_SUSPEND_FREEZER=y +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_PATA_PLATFORM=m +CONFIG_PATA_OF_PLATFORM=m +CONFIG_USB_EHCI_HCD_PPC_OF=y + +# CONFIG_MPC5121_ADS is not set +# CONFIG_MPC5121_GENERIC is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_NAND_FSL_ELBC is not set +CONFIG_THERMAL=y + +# CONFIG_MEMORY_HOTREMOVE is not set + +CONFIG_DMADEVICES=y +# CONFIG_FSL_DMA is not set + +CONFIG_SND_PPC=y + +CONFIG_PPC_82xx=y +CONFIG_PPC_83xx=y +CONFIG_PPC_86xx=y +CONFIG_EXTRA_TARGETS="" +# CONFIG_CODE_PATCHING_SELFTEST is not set +# CONFIG_FTR_FIXUP_SELFTEST is not set + +# CONFIG_MATH_EMULATION is not set +# CONFIG_RAPIDIO is not set +# CONFIG_FS_ENET is not set +# CONFIG_UCC_GETH is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_SERIAL_CPM is not set +# CONFIG_SERIAL_QE is not set +# CONFIG_I2C_CPM is not set + + +CONFIG_SERIO_XILINX_XPS_PS2=m + +# CONFIG_PPC_SMLPAR is not set + +CONFIG_MGCOGE=y +CONFIG_GEF_SBC610=y +CONFIG_GEF_PPC9A=y +CONFIG_GEF_SBC310=y + +CONFIG_QUICC_ENGINE=y +CONFIG_QE_GPIO=y +CONFIG_MPC8xxx_GPIO=y + +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +CONFIG_IDE_GD_ATAPI=y + +CONFIG_MCU_MPC8349EMITX=m + +CONFIG_GPIO_XILINX=y + +CONFIG_PMIC_DA903X=y +CONFIG_BACKLIGHT_DA903X=m +CONFIG_LEDS_DA903X=m + +CONFIG_MSI_BITMAP_SELFTEST=y + +CONFIG_RELOCATABLE=y + +# CONFIG_HVC_UDBG is not set +CONFIG_PRINT_STACK_DEPTH=64 + +CONFIG_BATTERY_DA9030=m +# CONFIG_TWL4030_CORE is not set + +CONFIG_BLK_DEV_IT8172=m +CONFIG_TOUCHSCREEN_DA9034=m + +CONFIG_SIMPLE_GPIO=y + +CONFIG_FSL_PQ_MDIO=m + +CONFIG_PS3_VRAM=m +CONFIG_MDIO_GPIO=m +CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL=m +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCF857X=m + +# CONFIG_USB_FHCI_HCD is not set +# CONFIG_FHCI_DEBUG is not set + +# CONFIG_DRM_RADEON_KMS is not set + +# CONFIG_AMIGAONE is not set + +CONFIG_PPC_OF_BOOT_TRAMPOLINE=y + +CONFIG_DTL=y + +CONFIG_MMC_SDHCI_OF=m + +# CONFIG_CONSISTENT_SIZE_BOOL is not set + +CONFIG_CAN_SJA1000_OF_PLATFORM=m + +CONFIG_PPC_EMULATED_STATS=y + +CONFIG_SWIOTLB=y + +# CONFIG_RDS is not set + +CONFIG_PPC_DISABLE_WERROR=y + +CONFIG_XILINX_EMACLITE=m + +CONFIG_GPIO_WM831X=m +# CONFIG_GPIO_LANGWELL is not set +# CONFIG_GPIO_UCB1400 is not set +CONFIG_EDAC_MPC85XX=m + +CONFIG_NR_IRQS=512 +CONFIG_SPARSE_IRQ=y + +CONFIG_PPC_MPC5200_LPBFIFO=m +CONFIG_CAN_MSCAN=m +CONFIG_CAN_MPC5XXX=m +CONFIG_PATA_MACIO=m +CONFIG_SERIAL_GRLIB_GAISLER_APBUART=m +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_88PM8607 is not set +# CONFIG_XPS_USB_HCD_XILINX is not set +# CONFIG_MMC_SDHCI_OF_ESDHC is not set +# CONFIG_MMC_SDHCI_OF_HLWD is not set + +# CONFIG_GPIO_SCH is not set diff --git a/config-powerpc32-generic b/config-powerpc32-generic new file mode 100644 index 000000000..a36ca81c6 --- /dev/null +++ b/config-powerpc32-generic @@ -0,0 +1,182 @@ +# CONFIG_SMP is not set +CONFIG_PPC32=y +# CONFIG_PPC64 is not set +# CONFIG_RTAS_PROC is not set +# CONFIG_PCMCIA_M8XX is not set +# CONFIG_HOTPLUG_PCI is not set +CONFIG_CPU_FREQ_PMAC=y +CONFIG_PPC_CHRP=y +CONFIG_PPC_PMAC=y +CONFIG_PPC_MPC52xx=y +CONFIG_PPC_PREP=y + +# CONFIG_PPC_MPC5200_SIMPLE is not set +CONFIG_SATA_FSL=m +# CONFIG_SATA_NV is not set + +# busted in .28git1 +# ERROR: "cacheable_memzero" [drivers/net/gianfar_driver.ko] undefined! +# CONFIG_GIANFAR is not set +CONFIG_USB_EHCI_FSL=y + +CONFIG_PMAC_APM_EMU=y +CONFIG_PMAC_BACKLIGHT=y + +CONFIG_HIGHMEM=y +# CONFIG_HIGHMEM_START_BOOL is not set +# CONFIG_LOWMEM_SIZE_BOOL is not set +# CONFIG_TASK_SIZE_BOOL is not set +# CONFIG_KERNEL_START_BOOL is not set +# CONFIG_PPC601_SYNC_FIX is not set +CONFIG_ADVANCED_OPTIONS=y +CONFIG_SCSI_MESH=m +CONFIG_SCSI_MESH_SYNC_RATE=5 +CONFIG_SCSI_MESH_RESET_DELAY_MS=4000 + +CONFIG_SCSI_MAC53C94=m +CONFIG_ADB_CUDA=y +CONFIG_ADB_MACIO=y +CONFIG_INPUT_ADBHID=y +CONFIG_ADB_PMU_LED=y +CONFIG_ADB_PMU_LED_IDE=y + +CONFIG_PMAC_MEDIABAY=y +CONFIG_BMAC=m +CONFIG_MACE=m +# CONFIG_MACE_AAUI_PORT is not set +CONFIG_MV643XX_ETH=m +CONFIG_I2C_HYDRA=m +CONFIG_I2C_MPC=m +CONFIG_THERM_WINDTUNNEL=m +CONFIG_THERM_ADT746X=m +# CONFIG_ANSLCD is not set + +CONFIG_FB_PLATINUM=y +CONFIG_FB_VALKYRIE=y +CONFIG_FB_CT65550=y +# CONFIG_BDI_SWITCH is not set + +CONFIG_MAC_FLOPPY=m +# CONFIG_BLK_DEV_FD is not set + +CONFIG_FB_ATY128=y +CONFIG_FB_ATY=y +CONFIG_FB_MATROX=y +# CONFIG_KEXEC is not set + +# CONFIG_HVC_RTAS is not set + +# CONFIG_UDBG_RTAS_CONSOLE is not set +CONFIG_BRIQ_PANEL=m + +# CONFIG_ATA_PIIX is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ATIIXP is not set +CONFIG_PATA_MPC52xx=m +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_SERVERWORKS is not set + +CONFIG_SERIAL_MPC52xx=y +CONFIG_SERIAL_MPC52xx_CONSOLE=y +CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 +# CONFIG_MPC5200_WDT is not set +CONFIG_8xxx_WDT=m +CONFIG_GEF_WDT=m + +CONFIG_PPC_MPC5200_BUGFIX=y +CONFIG_FEC_MPC52xx=m +#CHECK: This may later become a tristate. +CONFIG_FEC_MPC52xx_MDIO=y +CONFIG_PPC_MPC5200_GPIO=y +CONFIG_MDIO_GPIO=m + +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_DEBUG_STACKOVERFLOW=y + +# CONFIG_EMBEDDED6xx is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# CONFIG_BLK_DEV_PLATFORM is not set +# CONFIG_BLK_DEV_4DRIVES is not set +# CONFIG_BLK_DEV_ALI14XX is not set +# CONFIG_BLK_DEV_DTC2278 is not set +# CONFIG_BLK_DEV_HT6560B is not set +# CONFIG_BLK_DEV_QD65XX is not set +# CONFIG_BLK_DEV_UMC8672 is not set + +# CONFIG_VIRQ_DEBUG is not set + +CONFIG_PPC_BESTCOMM_ATA=m +CONFIG_PPC_BESTCOMM_FEC=m +CONFIG_PPC_BESTCOMM_GEN_BD=m + +CONFIG_FORCE_MAX_ZONEORDER=11 +# CONFIG_PAGE_OFFSET_BOOL is not set +# CONFIG_FB_FSL_DIU is not set +CONFIG_IRQSTACKS=y +CONFIG_VIRTUALIZATION=y + +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_HTC_EGPIO is not set + +# CONFIG_TIFM_CORE is not set + +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_CISS_SCSI_TAPE is not set + +# CONFIG_I2C_NFORCE2 is not set + +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set + +# CONFIG_MEMSTICK is not set + +# CONFIG_IPMI_HANDLER is not set +# CONFIG_TCG_TPM is not set + +# PPC gets sad with debug alloc (bz 448598) +# CONFIG_DEBUG_PAGEALLOC is not set + +CONFIG_SND_ISA=y +CONFIG_CRYPTO_DEV_TALITOS=m + +CONFIG_FSL_EMB_PERFMON=y +CONFIG_MPC8272_ADS=y +CONFIG_PQ2FADS=y +CONFIG_EP8248E=y +CONFIG_MPC831x_RDB=y +CONFIG_MPC832x_MDS=y +CONFIG_MPC832x_RDB=y +CONFIG_MPC834x_MDS=y +CONFIG_MPC834x_ITX=y +CONFIG_MPC836x_MDS=y +CONFIG_MPC836x_RDK=y +CONFIG_MPC837x_MDS=y +CONFIG_MPC837x_RDB=y +CONFIG_SBC834x=y +CONFIG_ASP834x=y +CONFIG_KMETER1=y +CONFIG_MPC8641_HPCN=y +CONFIG_SBC8641D=y +CONFIG_MPC8610_HPCD=y + +# CONFIG_USB_MUSB_HDRC is not set + +# busted in 2.6.27 +# drivers/mtd/maps/sbc8240.c: In function 'init_sbc8240_mtd': +# drivers/mtd/maps/sbc8240.c:172: warning: passing argument 1 of 'simple_map_init' from incompatible pointer type +# drivers/mtd/maps/sbc8240.c:177: error: 'struct mtd_info' has no member named 'module' + +CONFIG_MTD_NAND_FSL_UPM=m + +CONFIG_RCU_FANOUT=32 + +CONFIG_PERF_COUNTERS=y +CONFIG_PERF_EVENTS=y +CONFIG_EVENT_PROFILE=y + diff --git a/config-powerpc32-smp b/config-powerpc32-smp new file mode 100644 index 000000000..e60f59cdf --- /dev/null +++ b/config-powerpc32-smp @@ -0,0 +1,4 @@ +CONFIG_SMP=y +# CONFIG_HOTPLUG_CPU is not set +CONFIG_NR_CPUS=4 +# CONFIG_BATTERY_PMU is not set diff --git a/config-powerpc64 b/config-powerpc64 new file mode 100644 index 000000000..e2e8f99f0 --- /dev/null +++ b/config-powerpc64 @@ -0,0 +1,184 @@ +CONFIG_WINDFARM_PM81=y +CONFIG_WINDFARM_PM91=y +CONFIG_WINDFARM_PM121=y +CONFIG_PPC_PMAC64=y +CONFIG_PPC_MAPLE=y +CONFIG_PPC_CELL=y +CONFIG_PPC_IBM_CELL_BLADE=y +CONFIG_PPC_ISERIES=y +CONFIG_PPC_PSERIES=y +CONFIG_PPC_PMAC=y +CONFIG_PPC_PASEMI=y +# CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE is not set +CONFIG_PPC_PS3=y +CONFIG_PPC_CELLEB=y +CONFIG_PPC_CELL_QPACE=y +CONFIG_PS3_HTAB_SIZE=20 +# CONFIG_PS3_DYNAMIC_DMA is not set +CONFIG_PS3_ADVANCED=y +CONFIG_PS3_HTAB_SIZE=20 +# CONFIG_PS3_DYNAMIC_DMA is not set +CONFIG_PS3_VUART=y +CONFIG_PS3_PS3AV=y +CONFIG_PS3_STORAGE=m +CONFIG_PS3_DISK=m +CONFIG_PS3_ROM=m +CONFIG_PS3_FLASH=m +CONFIG_PS3_LPM=y +CONFIG_SND_PS3=m +CONFIG_SND_PS3_DEFAULT_START_DELAY=1000 +CONFIG_GELIC_NET=m +CONFIG_GELIC_WIRELESS=y +CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y +CONFIG_CBE_THERM=m +CONFIG_CBE_CPUFREQ=m +CONFIG_CBE_CPUFREQ_PMI=m +CONFIG_CBE_CPUFREQ_PMI_ENABLE=y +CONFIG_PMAC_RACKMETER=m +CONFIG_IBMEBUS=y +CONFIG_SPU_FS=m +CONFIG_RTAS_FLASH=y +CONFIG_PPC_SPLPAR=y +CONFIG_SCANLOG=y +CONFIG_LPARCFG=y +CONFIG_SERIAL_ICOM=m +CONFIG_HVCS=m +CONFIG_HVC_CONSOLE=y +CONFIG_HOTPLUG_PCI=y +CONFIG_THERM_PM72=y +CONFIG_IBMVETH=m +CONFIG_SCSI_IBMVSCSI=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=m +CONFIG_HOTPLUG_PCI_RPA=m +CONFIG_HOTPLUG_PCI_RPA_DLPAR=y +CONFIG_ADB_PMU_LED=y +CONFIG_ADB_PMU_LED_IDE=y +CONFIG_PMAC_SMU=y +CONFIG_CPU_FREQ_PMAC64=y +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +CONFIG_SPIDER_NET=m +CONFIG_HVC_RTAS=y +CONFIG_HVC_ISERIES=y +CONFIG_CBE_RAS=y + +# iSeries device drivers +# +CONFIG_ISERIES_VETH=m +CONFIG_VIODASD=m +CONFIG_VIOCD=m +CONFIG_VIOTAPE=m + +CONFIG_PASEMI_MAC=m +CONFIG_SERIAL_OF_PLATFORM=m + +CONFIG_PPC_PASEMI_IOMMU=y +CONFIG_SERIAL_TXX9=y +CONFIG_SERIAL_TXX9_NR_UARTS=6 +CONFIG_SERIAL_TXX9_CONSOLE=y + +CONFIG_HVC_BEAT=y + +CONFIG_FB_PS3=y +CONFIG_FB_PS3_DEFAULT_SIZE_M=18 + +CONFIG_PPC_PMI=m +CONFIG_PS3_SYS_MANAGER=y +# CONFIG_BLK_DEV_CELLEB is not set + +CONFIG_PATA_SCC=m + +CONFIG_APM_EMULATION=m + +CONFIG_PPC64=y +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_NR_CPUS=128 +# CONFIG_FB_PLATINUM is not set +# CONFIG_FB_VALKYRIE is not set +# CONFIG_FB_CT65550 is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set + +# CONFIG_POWER4_ONLY is not set + +CONFIG_RTAS_PROC=y +CONFIG_IOMMU_VMERGE=y +CONFIG_NUMA=y +# CONFIG_PPC_64K_PAGES is not set +CONFIG_SCHED_SMT=y + +# CONFIG_MV643XX_ETH is not set +CONFIG_IRQSTACKS=y +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_INPUT_PCSPKR is not set + +CONFIG_EHEA=m +CONFIG_INFINIBAND_EHCA=m + +CONFIG_HCALL_STATS=y + +CONFIG_XMON_DISASSEMBLY=y + +CONFIG_SCSI_IBMVSCSIS=m + +CONFIG_SECCOMP=y + +CONFIG_TUNE_CELL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_BLK_DEV_PLATFORM is not set +CONFIG_IBM_NEW_EMAC=m +CONFIG_IBM_NEW_EMAC_RXB=128 +CONFIG_IBM_NEW_EMAC_TXB=64 +CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32 +CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256 +CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0 +# CONFIG_IBM_NEW_EMAC_DEBUG is not set + +# CONFIG_VIRQ_DEBUG is not set +CONFIG_ELECTRA_CF=m + +CONFIG_MTD_NAND_PASEMI=m +CONFIG_EDAC_CELL=m +CONFIG_EDAC_CPC925=m +CONFIG_FRAME_WARN=2048 + +CONFIG_PHYP_DUMP=y +CONFIG_FORCE_MAX_ZONEORDER=13 +CONFIG_VIRTUALIZATION=y + +CONFIG_VSX=y + +CONFIG_SCSI_IBMVFC=m +# CONFIG_SCSI_IBMVFC_TRACE is not set +CONFIG_IBM_BSR=m + +CONFIG_SERIO_XILINX_XPS_PS2=m + +CONFIG_PPC_IBM_CELL_RESETBUTTON=y +CONFIG_PPC_IBM_CELL_POWERBUTTON=m +CONFIG_CBE_CPUFREQ_SPU_GOVERNOR=m + +CONFIG_RTC_DRV_PS3=y + +CONFIG_CRASH_DUMP=y +CONFIG_RELOCATABLE=y + +CONFIG_RCU_FANOUT=64 + +CONFIG_PERF_COUNTERS=y +CONFIG_PERF_EVENTS=y +CONFIG_EVENT_PROFILE=y + +CONFIG_KVM_BOOK3S_64=m +# CONFIG_KVM_EXIT_TIMING is not set + +#-- bz#607175 +#-- active memory sharing +CONFIG_PPC_SMLPAR=y +CONFIG_CMM=y +#-- DLPAR memory remove +# CONFIG_SPARSEMEM_VMEMMAP is not set diff --git a/config-rhel-generic b/config-rhel-generic new file mode 100644 index 000000000..09dbf8121 --- /dev/null +++ b/config-rhel-generic @@ -0,0 +1,205 @@ +# CONFIG_ISA is not set +# CONFIG_ISAPNP is not set +# CONFIG_I2C_PCA_ISA is not set + +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_AHA1542 is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_IN2000 is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_DC390T is not set + +# CONFIG_ATALK is not set +# CONFIG_DEV_APPLETALK is not set +# CONFIG_LTPC is not set +# CONFIG_COPS is not set +# CONFIG_IPX is not set +# CONFIG_IPDDP is not set +# CONFIG_DECNET is not set +# CONFIG_PLIP is not set + +# CONFIG_PCMCIA_AHA152X is not set +# CONFIG_PCMCIA_NINJA_SCSI is not set +# CONFIG_PCMCIA_QLOGIC is not set +# CONFIG_PCMCIA_SYM53C500 is not set + +# CONFIG_EL2 is not set +# CONFIG_ELPLUS is not set +# CONFIG_WD80x3 is not set +# CONFIG_I82092 is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_SUNDANCE is not set +# CONFIG_ULTRA is not set +# CONFIG_SKFP is not set +# CONFIG_DE600 is not set +# CONFIG_DE620 is not set +# CONFIG_CS89x0 is not set +# CONFIG_AC3200 is not set +# CONFIG_NI52 is not set +# CONFIG_NI65 is not set +# CONFIG_LANCE is not set +# CONFIG_EL16 is not set +# CONFIG_EL3 is not set +# CONFIG_3C515 is not set +# CONFIG_HAMACHI is not set +# CONFIG_HP100 is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_SB1000 is not set +# CONFIG_DEPCA is not set +# CONFIG_ATP is not set + +# CONFIG_TR is not set + +# CONFIG_GAMEPORT is not set + +# CONFIG_SND_AD1816A is not set +# CONFIG_SND_AD1848 is not set +# CONFIG_SND_CS4231 is not set +# CONFIG_SND_CS4236 is not set +# CONFIG_SND_ES968 is not set +# CONFIG_SND_ES1688 is not set +# CONFIG_SND_ES18XX is not set +# CONFIG_SND_GUSCLASSIC is not set +# CONFIG_SND_GUSEXTREME is not set +# CONFIG_SND_GUSMAX is not set +# CONFIG_SND_INTERWAVE is not set +# CONFIG_SND_INTERWAVE_STB is not set +# CONFIG_SND_OPTI92X_AD1848 is not set +# CONFIG_SND_OPTI92X_CS4231 is not set +# CONFIG_SND_OPTI93X is not set +# CONFIG_SND_MIRO is not set +# CONFIG_SND_SB8 is not set +# CONFIG_SND_SB16 is not set +# CONFIG_SND_SBAWE is not set +# CONFIG_SND_SB16_CSP is not set +# CONFIG_SND_WAVEFRONT is not set +# CONFIG_SND_ALS100 is not set +# CONFIG_SND_AZT2320 is not set +# CONFIG_SND_CMI8330 is not set +# CONFIG_SND_DT019X is not set +# CONFIG_SND_OPL3SA2 is not set +# CONFIG_SND_SGALAXY is not set +# CONFIG_SND_SSCAPE is not set + +# CONFIG_WAN_ROUTER is not set + +# CONFIG_BINFMT_AOUT is not set + +# CONFIG_DRM_TDFX is not set +# CONFIG_DRM_SIS is not set + +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_DAC960 is not set + +# CONFIG_PARIDE is not set + +# CONFIG_I2O is not set + +# CONFIG_MWAVE is not set + +# CONFIG_ROCKETPORT is not set +# CONFIG_R3964 is not set + +# CONFIG_JOYSTICK_ANALOG is not set +# CONFIG_JOYSTICK_A3D is not set +# CONFIG_JOYSTICK_ADI is not set +# CONFIG_JOYSTICK_COBRA is not set +# CONFIG_JOYSTICK_GF2K is not set +# CONFIG_JOYSTICK_GRIP is not set +# CONFIG_JOYSTICK_GRIP_MP is not set +# CONFIG_JOYSTICK_GUILLEMOT is not set +# CONFIG_JOYSTICK_INTERACT is not set +# CONFIG_JOYSTICK_SIDEWINDER is not set +# CONFIG_JOYSTICK_TMDC is not set +# CONFIG_JOYSTICK_IFORCE is not set +# CONFIG_JOYSTICK_WARRIOR is not set +# CONFIG_JOYSTICK_MAGELLAN is not set +# CONFIG_JOYSTICK_SPACEORB is not set +# CONFIG_JOYSTICK_SPACEBALL is not set +# CONFIG_JOYSTICK_STINGER is not set +# CONFIG_JOYSTICK_DB9 is not set +# CONFIG_JOYSTICK_GAMECON is not set +# CONFIG_JOYSTICK_TURBOGRAFX is not set + +# CONFIG_RADIO_CADET is not set +# CONFIG_RADIO_RTRACK is not set +# CONFIG_RADIO_RTRACK2 is not set +# CONFIG_RADIO_AZTECH is not set +# CONFIG_RADIO_GEMTEK is not set +# CONFIG_RADIO_GEMTEK_PCI is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_MAESTRO is not set +# CONFIG_RADIO_MIROPCM20 is not set +# CONFIG_RADIO_SF16FMI is not set +# CONFIG_RADIO_SF16FMR2 is not set +# CONFIG_RADIO_TERRATEC is not set +# CONFIG_RADIO_TRUST is not set +# CONFIG_RADIO_TYPHOON is not set +# CONFIG_RADIO_ZOLTRIX is not set + + +# CONFIG_VIDEO_PMS is not set +# CONFIG_VIDEO_BWQCAM is not set +# CONFIG_VIDEO_CQCAM is not set +# CONFIG_VIDEO_W9966 is not set +# CONFIG_VIDEO_CPIA is not set +# CONFIG_VIDEO_CPIA_PP is not set +# CONFIG_VIDEO_CPIA_USB is not set +# CONFIG_VIDEO_SAA5249 is not set +# CONFIG_VIDEO_STRADIS is not set +# CONFIG_VIDEO_ZORAN is not set +# CONFIG_VIDEO_ZORAN_BUZ is not set +# CONFIG_VIDEO_ZORAN_DC10 is not set +# CONFIG_VIDEO_ZORAN_DC30 is not set +# CONFIG_VIDEO_ZORAN_LML33 is not set +# CONFIG_VIDEO_ZORAN_LML33R10 is not set +# CONFIG_VIDEO_MEYE is not set +# CONFIG_VIDEO_SAA7134 is not set +# CONFIG_VIDEO_MXB is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_CX88 is not set +# CONFIG_VIDEO_SAA5246A is not set + +# CONFIG_INFTL is not set +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PCI is not set + +# CONFIG_FB_MATROX is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_HGA_ACCEL is not set +# CONFIG_FB_3DFX_ACCEL is not set + +# CONFIG_JFS_FS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_9P_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set + diff --git a/config-s390x b/config-s390x new file mode 100644 index 000000000..331c30c1b --- /dev/null +++ b/config-s390x @@ -0,0 +1,227 @@ +CONFIG_64BIT=y +# CONFIG_MARCH_G5 is not set +# CONFIG_MARCH_Z900 is not set +CONFIG_MARCH_Z9_109=y +# CONFIG_MARCH_Z990 is not set + +CONFIG_NR_CPUS=64 +CONFIG_COMPAT=y + +# See bug 496596 +CONFIG_HZ_100=y +# See bug 496605 +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +CONFIG_MMU=y + +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_NO_IDLE_HZ=y + +CONFIG_SMP=y + +# +# I/O subsystem configuration +# +CONFIG_QDIO=m + +# +# Misc +# +CONFIG_IPL=y +# CONFIG_IPL_TAPE is not set +CONFIG_IPL_VM=y +# CONFIG_PROCESS_DEBUG is not set +CONFIG_PFAULT=y +CONFIG_SHARED_KERNEL=y +CONFIG_CMM=m +CONFIG_CMM_PROC=y +# CONFIG_NETIUCV is not set +CONFIG_SMSGIUCV=m + +# +# SCSI low-level drivers +# +CONFIG_ZFCP=m +CONFIG_ZFCPDUMP=y +CONFIG_CCW=y + +# +# S/390 block device drivers +# +CONFIG_DCSSBLK=m +CONFIG_BLK_DEV_XPRAM=m +CONFIG_DASD=m +CONFIG_DASD_PROFILE=y +CONFIG_DASD_ECKD=m +CONFIG_DASD_FBA=m +CONFIG_DASD_DIAG=m +CONFIG_DASD_EER=y + +# +# S/390 character device drivers +# +CONFIG_TN3270=y +CONFIG_TN3270_CONSOLE=y +CONFIG_TN3215=y +CONFIG_TN3215_CONSOLE=y +CONFIG_CCW_CONSOLE=y +CONFIG_SCLP_TTY=y +CONFIG_SCLP_CONSOLE=y +CONFIG_SCLP_VT220_TTY=y +CONFIG_SCLP_VT220_CONSOLE=y +CONFIG_SCLP_CPI=m +CONFIG_SCLP_ASYNC=m +CONFIG_S390_TAPE=m +CONFIG_S390_TAPE_3590=m + +CONFIG_APPLDATA_BASE=y +CONFIG_APPLDATA_MEM=m +CONFIG_APPLDATA_OS=m +CONFIG_APPLDATA_NET_SUM=m +CONFIG_TN3270_TTY=y +CONFIG_TN3270_FS=m + + +# +# S/390 tape interface support +# +CONFIG_S390_TAPE_BLOCK=y + +# +# S/390 tape hardware support +# +CONFIG_S390_TAPE_34XX=m + +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Token Ring devices +# +CONFIG_TR=y +CONFIG_NETCONSOLE=m + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# S/390 network device drivers +# +CONFIG_LCS=m +CONFIG_CTC=m +CONFIG_IUCV=m +CONFIG_QETH=m +CONFIG_QETH_IPV6=y +CONFIG_CCWGROUP=m + +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_MAC80211 is not set +# CONFIG_B44 is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_OSF_PARTITION is not set +CONFIG_IBM_PARTITION=y +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_SGI_PARTITION is not set +# CONFIG_SUN_PARTITION is not set + + +# +# S390 crypto hw +# +CONFIG_CRYPTO_SHA1_S390=m +CONFIG_CRYPTO_SHA256_S390=m +CONFIG_CRYPTO_DES_S390=m +CONFIG_CRYPTO_AES_S390=m + +# +# Kernel hacking +# + +# +# S390 specific stack options; needs gcc 3.5 so off for now +# +CONFIG_PACK_STACK=y +CONFIG_CHECK_STACK=y +# CONFIG_WARN_STACK is not set +# CONFIG_SMALL_STACK is not set + +CONFIG_ZVM_WATCHDOG=m +CONFIG_VMLOGRDR=m +CONFIG_MONREADER=m + +CONFIG_VIRT_CPU_ACCOUNTING=y + +# CONFIG_CLAW is not set + +CONFIG_VMCP=m + +# CONFIG_ATMEL is not set + +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MII is not set + + +CONFIG_STACK_GUARD=256 +CONFIG_CMM_IUCV=y + +# CONFIG_DETECT_SOFTLOCKUP is not set + +CONFIG_S390_HYPFS_FS=y + +CONFIG_MONWRITER=m +CONFIG_ZCRYPT=m +CONFIG_ZCRYPT_MONOLITHIC=y + +CONFIG_S390_EXEC_PROTECT=y +CONFIG_AFIUCV=m +CONFIG_S390_PRNG=m + +CONFIG_S390_VMUR=m + +# CONFIG_THERMAL is not set + +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CTCM=m +CONFIG_QETH_L2=m +CONFIG_QETH_L3=m +CONFIG_CRYPTO_SHA512_S390=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_S390_GUEST=y + + +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_CHSC_SCH=m + +# drivers/isdn/hardware/mISDN/hfcmulti.c:5255:2: error: #error "not running on big endian machines now" +# CONFIG_MISDN_HFCMULTI is not set + +CONFIG_HVC_IUCV=y + +CONFIG_RCU_FANOUT=64 + +CONFIG_SECCOMP=y + +CONFIG_PM=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="/dev/jokes" + +CONFIG_PERF_COUNTERS=y +CONFIG_PERF_EVENTS=y +CONFIG_EVENT_PROFILE=y + +CONFIG_SMSGIUCV_EVENT=m + +# CONFIG_PREEMPT_TRACER is not set diff --git a/config-sparc64-generic b/config-sparc64-generic new file mode 100644 index 000000000..7d9bef158 --- /dev/null +++ b/config-sparc64-generic @@ -0,0 +1,201 @@ +CONFIG_SMP=y +CONFIG_SPARC=y +CONFIG_SPARC64=y +CONFIG_SECCOMP=y +CONFIG_HZ_100=y +# CONFIG_HZ_1000 is not set +CONFIG_HZ=100 + +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=m +CONFIG_CPU_FREQ_DEBUG=y +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_STAT_DETAILS is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_US3_FREQ=m +CONFIG_US2E_FREQ=m + +CONFIG_SUN_LDOMS=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_64BIT=y +# CONFIG_BBC_I2C is not set +CONFIG_HUGETLB_PAGE_SIZE_4MB=y +# CONFIG_HUGETLB_PAGE_SIZE_512K is not set +# CONFIG_HUGETLB_PAGE_SIZE_64K is not set +CONFIG_NR_CPUS=256 +CONFIG_US3_FREQ=m +CONFIG_US2E_FREQ=m +CONFIG_SUN_OPENPROMFS=m +CONFIG_COMPAT=y +CONFIG_UID16=y +CONFIG_BINFMT_ELF32=y +CONFIG_ENVCTRL=m +CONFIG_DISPLAY7SEG=m +CONFIG_WATCHDOG_CP1XXX=m +CONFIG_WATCHDOG_RIO=m +# CONFIG_CMDLINE_BOOL is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_PARPORT is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_LIRC_PARALLEL is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_SIMTEC is not set +CONFIG_I2C_ALI1535=m +# CONFIG_VGASTATE is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BW2 is not set +CONFIG_FB_CG3=y +CONFIG_FB_CG6=y +# CONFIG_FB_RIVA is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +CONFIG_FB_ATY=y +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_TRIDENT is not set +CONFIG_FB_SBUS=y +CONFIG_FB_FFB=y +# CONFIG_FB_TCX is not set +# CONFIG_FB_CG14 is not set +CONFIG_FB_PM2=y +CONFIG_FB_P9100=y +# CONFIG_FB_LEO is not set +CONFIG_FB_XVR500=y +CONFIG_FB_XVR2500=y +# CONFIG_VGASTATE is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_KYRO is not set +# CONFIG_AGP is not set +# CONFIG_DRM_NOUVEAU is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +# CONFIG_FONT_8x16 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +CONFIG_FONT_SUN8x16=y +CONFIG_FONT_SUN12x22=y +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_SERIAL_8250 is not set +CONFIG_SERIAL_SUNZILOG=y +CONFIG_SERIAL_SUNZILOG_CONSOLE=y +CONFIG_SERIAL_SUNSU=y +CONFIG_SERIAL_SUNSU_CONSOLE=y +CONFIG_SERIAL_SUNSAB=y +CONFIG_SERIAL_SUNSAB_CONSOLE=y +CONFIG_SERIAL_SUNHV=y +CONFIG_SUN_OPENPROMIO=y +CONFIG_OBP_FLASH=m +# CONFIG_SERIO_SERPORT is not set +CONFIG_BLK_DEV_FD=y +CONFIG_SUNVDC=m +CONFIG_SUNVNET=m +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_EATA is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +CONFIG_SCSI_QLOGICPTI=m +CONFIG_SCSI_SUNESP=m +CONFIG_SUNLANCE=m +CONFIG_SUNBMAC=m +CONFIG_SUNQE=m +# CONFIG_DM9102 is not set +# CONFIG_HAMACHI is not set +# CONFIG_R8169 is not set +CONFIG_ATM_FORE200E_USE_TASKLET=y +CONFIG_ATM_FORE200E_DEBUG=0 +CONFIG_ATM_FORE200E_TX_RETRY=16 +# CONFIG_DRM_TDFX is not set +CONFIG_KEYBOARD_ATKBD=y +CONFIG_KEYBOARD_SUNKBD=y +# CONFIG_INPUT_PCSPKR is not set +CONFIG_INPUT_SPARCSPKR=m +# CONFIG_SOUND_PRIME is not set +# CONFIG_SND_SUN_AMD7930 is not set +CONFIG_SND_SUN_CS4231=m +# CONFIG_SND_SUN_DBRI is not set +CONFIG_PARPORT_SUNBPP=m +CONFIG_LOGO_SUN_CLUT224=y +CONFIG_MTD_SUN_UFLASH=m +CONFIG_MYRI_SBUS=m +# CONFIG_SGI_IOC4 is not set +# CONFIG_VIDEO_ZORAN is not set +# CONFIG_VIDEO_STRADIS is not set +# CONFIG_IEEE1394_SBP2 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_DCFLUSH is not set +# CONFIG_DEBUG_BOOTMEM is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_LOCKDEP is not set +# CONFIG_STACK_DEBUG is not set + +CONFIG_SPARSEMEM_VMEMMAP=y + +# CONFIG_THERMAL is not set + +CONFIG_FRAME_WARN=2048 + +CONFIG_NUMA=y + +CONFIG_SND_SPARC=y + +CONFIG_HW_RANDOM_N2RNG=m + +# drivers/isdn/hardware/mISDN/hfcmulti.c:5255:2: error: #error "not running on big endian machines now" +# CONFIG_MISDN_HFCMULTI is not set + +CONFIG_US3_MC=y +CONFIG_SENSORS_ULTRA45=m +CONFIG_LEDS_SUNFIRE=m +CONFIG_TADPOLE_TS102_UCTRL=m + +CONFIG_RCU_FANOUT=64 + +CONFIG_LIRC_ENE0100=m +# CONFIG_BATTERY_DS2782 is not set +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SN9C20X_EVDEV=y +CONFIG_LSM_MMAP_MIN_ADDR=65536 + +CONFIG_PERF_COUNTERS=y +CONFIG_PERF_EVENTS=y +CONFIG_EVENT_PROFILE=y + +CONFIG_EARLYFB=y +CONFIG_SERIAL_GRLIB_GAISLER_APBUART=m + +CONFIG_GRETH=m +CONFIG_FB_XVR1000=y diff --git a/config-x86-generic b/config-x86-generic new file mode 100644 index 000000000..9179350a4 --- /dev/null +++ b/config-x86-generic @@ -0,0 +1,473 @@ +CONFIG_UID16=y +# CONFIG_64BIT is not set +# CONFIG_KERNEL_LZMA is not set + +# +# Processor type and features +# +# +# Enable summit and co via the generic arch +# +CONFIG_X86_EXTENDED_PLATFORM=y +CONFIG_X86_32_NON_STANDARD=y + +# CONFIG_X86_ELAN is not set +# CONFIG_X86_NUMAQ is not set +# CONFIG_X86_SUMMIT is not set +CONFIG_X86_BIGSMP=y +# CONFIG_X86_VISWS is not set +# CONFIG_X86_RDC321X is not set +# CONFIG_X86_ES7000 is not set +# CONFIG_M386 is not set +# CONFIG_M486 is not set +# CONFIG_M586 is not set +# CONFIG_M586TSC is not set +# CONFIG_M586MMX is not set +CONFIG_M686=y +# CONFIG_MPENTIUMII is not set +# CONFIG_MPENTIUMIII is not set +# CONFIG_MPENTIUMM is not set +# CONFIG_MPENTIUM4 is not set +# CONFIG_MK6 is not set +# CONFIG_MK7 is not set +# CONFIG_MK8 is not set +# CONFIG_MCRUSOE is not set +# CONFIG_MWINCHIPC6 is not set +# CONFIG_MWINCHIP3D is not set +# CONFIG_MCYRIXIII is not set +# CONFIG_MVIAC3_2 is not set +CONFIG_SMP=y +CONFIG_NR_CPUS=32 +CONFIG_X86_GENERIC=y +# CONFIG_X86_PPRO_FENCE is not set +CONFIG_HPET=y +CONFIG_HPET_TIMER=y +# CONFIG_HPET_MMAP is not set +CONFIG_X86_MCE=y +CONFIG_TOSHIBA=m +CONFIG_I8K=m +CONFIG_SONYPI=m +CONFIG_SONYPI_COMPAT=y +CONFIG_MICROCODE=m +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +# CONFIG_NUMA is not set + +# CONFIG_NOHIGHMEM is not set +CONFIG_HIGHMEM4G=y +# CONFIG_HIGHMEM64G is not set +CONFIG_HIGHMEM=y +CONFIG_HIGHPTE=y + +# CONFIG_MATH_EMULATION is not set +CONFIG_MTRR=y +CONFIG_X86_PAT=y +CONFIG_X86_PM_TIMER=y + +CONFIG_EFI=y +CONFIG_EFI_VARS=y +CONFIG_EFI_PCDP=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set + +CONFIG_DMAR=y +CONFIG_DMAR_BROKEN_GFX_WA=y +CONFIG_DMAR_FLOPPY_WA=y +CONFIG_DMAR_DEFAULT_ON=y + +CONFIG_FB_GEODE=y +CONFIG_FB_GEODE_LX=y +CONFIG_FB_GEODE_GX=y +# CONFIG_FB_GEODE_GX1 is not set + +# CONFIG_PCI_GOBIOS is not set +# CONFIG_PCI_GODIRECT is not set +# CONFIG_PCI_GOMMCONFIG is not set +CONFIG_PCI_GOANY=y + +# +# x86 specific drivers +# +CONFIG_PCMCIA_FDOMAIN=m +CONFIG_SCSI_FUTURE_DOMAIN=m +CONFIG_SCSI_ADVANSYS=m + +CONFIG_CC_STACKPROTECTOR=y + +CONFIG_SECCOMP=y + +CONFIG_CAPI_EICON=y + +# +# APM (Advanced Power Management) BIOS Support +# +CONFIG_APM=y +# CONFIG_APM_IGNORE_USER_SUSPEND is not set +# CONFIG_APM_DO_ENABLE is not set +CONFIG_APM_CPU_IDLE=y +# CONFIG_APM_DISPLAY_BLANK is not set +# CONFIG_APM_ALLOW_INTS is not set + +# +# Kernel debugging +# +CONFIG_X86_MPPARSE=y + +CONFIG_ACPI=y +CONFIG_ACPI_AC=y +# CONFIG_ACPI_ASUS is not set +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_SYSFS_POWER=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BLACKLIST_YEAR=1999 +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_CONTAINER=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_POWER=y +CONFIG_ACPI_PROCFS=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_THERMAL=y +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_ACPI_TOSHIBA=m +CONFIG_ACPI_VIDEO=m +# Disable in F9. +CONFIG_ACPI_PROC_EVENT=y +CONFIG_PNPACPI=y +CONFIG_ACPI_POWER_METER=m +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m + +# +# CPUFreq processor drivers +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEBUG=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=m +CONFIG_CPU_FREQ_STAT_DETAILS=y + +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_PCC_CPUFREQ=m +# CONFIG_X86_POWERNOW_K6 is not set +CONFIG_X86_POWERNOW_K7=y +CONFIG_X86_POWERNOW_K8=m +# CONFIG_X86_GX_SUSPMOD is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_SPEEDSTEP_ICH=y +CONFIG_X86_SPEEDSTEP_SMI=y +CONFIG_X86_SPEEDSTEP_LIB=y +# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set +CONFIG_X86_P4_CLOCKMOD=m +CONFIG_X86_LONGRUN=y +# CONFIG_X86_LONGHAUL is not set +# CONFIG_X86_CPUFREQ_NFORCE2 is not set +# e_powersaver is dangerous +# CONFIG_X86_E_POWERSAVER is not set + +CONFIG_X86_HT=y +CONFIG_X86_TRAMPOLINE=y + +# +# various x86 specific drivers +# +CONFIG_NVRAM=y +CONFIG_IBM_ASM=m +CONFIG_CRYPTO_AES_586=m +CONFIG_CRYPTO_TWOFISH_586=m +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m + +CONFIG_GENERIC_ISA_DMA=y +CONFIG_SCHED_SMT=y +CONFIG_SUSPEND=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" + +CONFIG_DEBUG_RODATA=y +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_4KSTACKS is not set +CONFIG_DEBUG_NMI_TIMEOUT=5 + +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_BIOS=y + +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_COMPAQ=m +# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set +CONFIG_HOTPLUG_PCI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# SHPC has half-arsed PCI probing, which makes it load on too many systems +# CONFIG_HOTPLUG_PCI_SHPC is not set +CONFIG_PM=y + +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y + +CONFIG_BLK_DEV_AMD74XX=y + +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m + +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +CONFIG_SCx200_ACB=m + +# CONFIG_X86_REBOOTFIXUPS is not set + +CONFIG_DELL_RBU=m +CONFIG_DCDBAS=m + +CONFIG_GPIO_SCH=m +CONFIG_PC8736x_GPIO=m +# CONFIG_NSC_GPIO is not set +CONFIG_CS5535_GPIO=m + +CONFIG_EDAC=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_AMD76X=m +CONFIG_EDAC_E7XXX=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82860=m +CONFIG_EDAC_I82875P=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_R82600=m +CONFIG_EDAC_AMD8131=m +CONFIG_EDAC_AMD8111=m + +CONFIG_SCHED_MC=y + +CONFIG_SND_ISA=y +CONFIG_SND_ES18XX=m + +CONFIG_TCG_INFINEON=m + +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_GEODE=m +CONFIG_HW_RANDOM_VIA=m + + +# CONFIG_COMPAT_VDSO is not set + +# CONFIG_SGI_IOC4 is not set + +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ASUS_LAPTOP=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +CONFIG_FUJITSU_LAPTOP=m +# CONFIG_FUJITSU_LAPTOP_DEBUG is not set +CONFIG_MSI_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_DELL_LAPTOP=m +CONFIG_ACPI_WMI=m +CONFIG_ACER_WMI=m +CONFIG_ACERHDF=m +CONFIG_TC1100_WMI=m +CONFIG_HP_WMI=m +CONFIG_DELL_WMI=m + +# CONFIG_SMSC37B787_WDT is not set +CONFIG_W83697HF_WDT=m +CONFIG_IB700_WDT=m + +CONFIG_RELOCATABLE=y +CONFIG_PHYSICAL_ALIGN=0x400000 +CONFIG_PHYSICAL_START=0x400000 +CONFIG_CRASH_DUMP=y +# CONFIG_KEXEC_JUMP is not set +CONFIG_PROC_VMCORE=y +CONFIG_CRASH=m + +CONFIG_CRYPTO_DEV_GEODE=m + +CONFIG_VIDEO_CAFE_CCIC=m + +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_LGUEST=m + +CONFIG_PARAVIRT_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set + +# PARAVIRT_SPINLOCKS has a 5% perf hit +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_KVM_CLOCK=y +CONFIG_KVM_GUEST=y +CONFIG_LGUEST_GUEST=y +CONFIG_VMI=y + +CONFIG_XEN=y +CONFIG_XEN_MAX_DOMAIN_MEMORY=8 +CONFIG_XEN_BALLOON=y +CONFIG_XEN_SCRUB_PAGES=y +CONFIG_XEN_SAVE_RESTORE=y +CONFIG_HVC_XEN=y +CONFIG_XEN_FBDEV_FRONTEND=y +CONFIG_XEN_KBDDEV_FRONTEND=y +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y + +CONFIG_MTD_ESB2ROM=m +CONFIG_MTD_CK804XROM=m +CONFIG_MTD_NAND_CAFE=m + +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y + +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set + +CONFIG_MACINTOSH_DRIVERS=y + +CONFIG_DMIID=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m + +CONFIG_DMADEVICES=y +CONFIG_INTEL_IOATDMA=m + +CONFIG_SENSORS_I5K_AMB=m + +# CONFIG_CPA_DEBUG is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set + +CONFIG_HP_WATCHDOG=m + +CONFIG_OLPC=y +CONFIG_BATTERY_OLPC=y +CONFIG_MOUSE_PS2_OLPC=y + +CONFIG_STRICT_DEVMEM=y + +# CONFIG_NO_BOOTMEM is not set + +# CONFIG_MEMTEST is not set +# CONFIG_MAXSMP is not set +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_SYSPROF_TRACER=y + +# CONFIG_X86_VERBOSE_BOOTUP is not set +# CONFIG_MMIOTRACE_TEST is not set + +# CONFIG_DEBUG_PER_CPU_MAPS is not set + +CONFIG_HP_ILO=m + +CONFIG_BACKLIGHT_MBP_NVIDIA=m + +CONFIG_OPROFILE_IBS=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y + +# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set +CONFIG_X86_RESERVE_LOW_64K=y + +# CONFIG_CMDLINE_BOOL is not set + +CONFIG_PANASONIC_LAPTOP=m + +CONFIG_XEN_DEBUG_FS=y +CONFIG_X86_PTRACE_BTS=y + +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y + +CONFIG_POWER_TRACER=y +CONFIG_HW_BRANCH_TRACER=y + +# CONFIG_SPARSE_IRQ is not set + +CONFIG_RCU_FANOUT=32 + +# CONFIG_IOMMU_STRESS is not set + +CONFIG_PERF_COUNTERS=y +CONFIG_PERF_EVENTS=y +CONFIG_EVENT_PROFILE=y + +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +# CONFIG_X86_ANCIENT_MCE is not set +# CONFIG_X86_MCE_INJECT is not set + +# CONFIG_X86_MRST is not set +CONFIG_SFI=y + +CONFIG_INPUT_WINBOND_CIR=m +CONFIG_I2C_SCMI=m +CONFIG_SBC_FITPC2_WATCHDOG=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_DECODE_MCE=m + +CONFIG_GPIO_LANGWELL=y + +# CONFIG_INTEL_TXT is not set + +CONFIG_CS5535_MFGPT=m +CONFIG_GEODE_WDT=m +CONFIG_CS5535_CLOCK_EVENT_SRC=m + +CONFIG_LEDS_INTEL_SS4200=m + +CONFIG_X86_DECODER_SELFTEST=y + +CONFIG_ACPI_CMPC=m +CONFIG_MSI_WMI=m +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_SAMSUNG_LAPTOP is not set + +CONFIG_VGA_SWITCHEROO=y +CONFIG_LPC_SCH=m diff --git a/config-x86_64-generic b/config-x86_64-generic new file mode 100644 index 000000000..410cb7ffe --- /dev/null +++ b/config-x86_64-generic @@ -0,0 +1,397 @@ +CONFIG_64BIT=y +CONFIG_UID16=y +# CONFIG_KERNEL_LZMA is not set + +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_VSMP is not set +# CONFIG_X86_UV is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_MTRR=y +CONFIG_NUMA=y +CONFIG_K8_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +# CONFIG_NUMA_EMU is not set +CONFIG_NR_CPUS=256 +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_P4_CLOCKMOD=m +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_DEBUG_RODATA=y +CONFIG_MICROCODE=m +CONFIG_SWIOTLB=y +CONFIG_CALGARY_IOMMU=y +CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y +CONFIG_X86_PM_TIMER=y +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_PCI_BIOS=y +CONFIG_PCI_MMCONFIG=y +CONFIG_DMAR=y +CONFIG_DMAR_BROKEN_GFX_WA=y +CONFIG_DMAR_FLOPPY_WA=y +CONFIG_DMAR_DEFAULT_ON=y + +CONFIG_KEXEC_JUMP=y + +CONFIG_EFI=y +CONFIG_EFI_VARS=y +CONFIG_EFI_PCDP=y +CONFIG_FB_EFI=y + +CONFIG_SECCOMP=y + +CONFIG_CAPI_EICON=y + +CONFIG_GENERIC_ISA_DMA=y +CONFIG_SCHED_SMT=y +CONFIG_SUSPEND=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" + +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_DEBUG=y +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_CPU_FREQ_STAT=m +CONFIG_CPU_FREQ_STAT_DETAILS=y + +CONFIG_ACPI=y +CONFIG_ACPI_AC=y +# CONFIG_ACPI_ASUS is not set +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_SYSFS_POWER=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BLACKLIST_YEAR=0 +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_CONTAINER=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_HOTPLUG_MEMORY=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_PROCFS=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_TOSHIBA=m +CONFIG_ACPI_POWER=y +CONFIG_ACPI_VIDEO=m +# Disable in F9. +CONFIG_ACPI_PROC_EVENT=y +CONFIG_ACPI_POWER_METER=m +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m + +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ASUS_LAPTOP=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_FUJITSU_LAPTOP=m +# CONFIG_FUJITSU_LAPTOP_DEBUG is not set +CONFIG_MSI_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +CONFIG_DELL_LAPTOP=m +CONFIG_ACPI_WMI=m +CONFIG_ACER_WMI=m +CONFIG_ACERHDF=m +CONFIG_HP_WMI=m +CONFIG_DELL_WMI=m + +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set + +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_COMPAQ=m +# CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM is not set +CONFIG_HOTPLUG_PCI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# SHPC has half-arsed PCI probing, which makes it load on too many systems +CONFIG_HOTPLUG_PCI_SHPC=m + +CONFIG_HPET=y +# CONFIG_HPET_MMAP is not set +CONFIG_PM=y + +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y + +CONFIG_PNP=y +CONFIG_PNPACPI=y + +CONFIG_BLK_DEV_AMD74XX=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_AES is not set +CONFIG_CRYPTO_AES_X86_64=m +# CONFIG_CRYPTO_TWOFISH is not set +CONFIG_CRYPTO_TWOFISH_X86_64=m +# CONFIG_CRYPTO_SALSA20 is not set +CONFIG_CRYPTO_SALSA20_X86_64=m +CONFIG_CRYPTO_AES_NI_INTEL=m + +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y + +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_PIIX4=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set + +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +CONFIG_DELL_RBU=m +CONFIG_DCDBAS=m + +CONFIG_NVRAM=y + +CONFIG_EDAC=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_AMD76X=m +CONFIG_EDAC_E7XXX=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I82875P=m +CONFIG_EDAC_I82860=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_R82600=m +CONFIG_EDAC_AMD8131=m +CONFIG_EDAC_AMD8111=m +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_DECODE_MCE=m + +CONFIG_SCHED_MC=y + +CONFIG_TCG_INFINEON=m + +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m + +# CONFIG_HW_RANDOM_GEODE is not set + + +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_DEBUG_NMI_TIMEOUT=5 + +CONFIG_GPIO_SCH=m +# CONFIG_PC8736x_GPIO is not set + +# CONFIG_DISCONTIGMEM_MANUAL is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y + +# CONFIG_BLK_DEV_CMD640 is not set +# CONFIG_BLK_DEV_RZ1000 is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_CS5520 is not set +# CONFIG_BLK_DEV_CS5530 is not set +# CONFIG_BLK_DEV_CS5535 is not set + +CONFIG_CC_STACKPROTECTOR=y + +CONFIG_SGI_IOC4=m +CONFIG_SGI_XP=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set + +# CONFIG_SMSC37B787_WDT is not set +CONFIG_W83697HF_WDT=m + +# CONFIG_VIDEO_CAFE_CCIC is not set + +CONFIG_MTD_ESB2ROM=m +CONFIG_MTD_CK804XROM=m + +CONFIG_RELOCATABLE=y +CONFIG_MACINTOSH_DRIVERS=y + +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_PROC_VMCORE=y +CONFIG_CRASH=m + +CONFIG_DMIID=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m + + +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y + +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m + +CONFIG_PARAVIRT_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +# PARAVIRT_SPINLOCKS has a 5% perf hit +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_KVM_CLOCK=y +CONFIG_KVM_GUEST=y + +CONFIG_XEN=y +CONFIG_XEN_MAX_DOMAIN_MEMORY=32 +CONFIG_XEN_BALLOON=y +CONFIG_XEN_SCRUB_PAGES=y +CONFIG_XEN_SAVE_RESTORE=y +CONFIG_HVC_XEN=y +CONFIG_XEN_FBDEV_FRONTEND=y +CONFIG_XEN_KBDDEV_FRONTEND=y +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_SYS_HYPERVISOR=y + +CONFIG_DMADEVICES=y +CONFIG_INTEL_IOATDMA=m + +CONFIG_SENSORS_I5K_AMB=m + +# CONFIG_COMPAT_VDSO is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_CPA_DEBUG is not set + +CONFIG_HP_WATCHDOG=m + +CONFIG_FRAME_WARN=2048 + +CONFIG_NODES_SHIFT=9 +CONFIG_X86_PAT=y +# FIXME: These should be 32bit only +# CONFIG_FB_N411 is not set +CONFIG_STRICT_DEVMEM=y + +CONFIG_DIRECT_GBPAGES=y + +# CONFIG_NO_BOOTMEM is not set + +# CONFIG_MEMTEST is not set +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_STATS=y +# CONFIG_MAXSMP is not set +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_SYSPROF_TRACER=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +# CONFIG_MMIOTRACE_TEST is not set + +CONFIG_X86_MPPARSE=y + +CONFIG_BACKLIGHT_MBP_NVIDIA=m + +CONFIG_OPROFILE_IBS=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y + +# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set +CONFIG_X86_RESERVE_LOW_64K=y + +# CONFIG_CMDLINE_BOOL is not set + +CONFIG_PANASONIC_LAPTOP=m + +CONFIG_XEN_DEBUG_FS=y +CONFIG_X86_PTRACE_BTS=y + +CONFIG_I7300_IDLE=m +CONFIG_INTR_REMAP=y + +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y + +CONFIG_POWER_TRACER=y +CONFIG_HW_BRANCH_TRACER=y + +CONFIG_X86_X2APIC=y +CONFIG_SPARSE_IRQ=y + +CONFIG_RCU_FANOUT=64 + +# CONFIG_IOMMU_STRESS is not set + +CONFIG_PERF_COUNTERS=y +CONFIG_PERF_EVENTS=y +CONFIG_EVENT_PROFILE=y + +# CONFIG_X86_MCE_INJECT is not set + +CONFIG_SFI=y +CONFIG_INPUT_WINBOND_CIR=m +CONFIG_I2C_SCMI=m +CONFIG_SBC_FITPC2_WATCHDOG=m +CONFIG_EDAC_I3200=m +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_INTEL_TXT=y +CONFIG_GPIO_LANGWELL=y + +CONFIG_FUNCTION_GRAPH_TRACER=y + +CONFIG_ACPI_CMPC=m +CONFIG_MSI_WMI=m +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_SAMSUNG_LAPTOP is not set + +CONFIG_CS5535_MFGPT=m +CONFIG_GEODE_WDT=m +CONFIG_CS5535_CLOCK_EVENT_SRC=m + +CONFIG_X86_DECODER_SELFTEST=y + +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +CONFIG_VGA_SWITCHEROO=y +CONFIG_LPC_SCH=m diff --git a/coredump-uid-pipe-check.patch b/coredump-uid-pipe-check.patch new file mode 100644 index 000000000..4b98a07ab --- /dev/null +++ b/coredump-uid-pipe-check.patch @@ -0,0 +1,14 @@ +diff -up linux-2.6.32.noarch/fs/exec.c.orig linux-2.6.32.noarch/fs/exec.c +--- linux-2.6.32.noarch/fs/exec.c.orig 2010-02-22 12:40:06.000000000 -0500 ++++ linux-2.6.32.noarch/fs/exec.c 2010-02-22 12:48:34.000000000 -0500 +@@ -1973,8 +1973,9 @@ void do_coredump(long signr, int exit_co + /* + * Dont allow local users get cute and trick others to coredump + * into their pre-created files: ++ * Note this isn't relevant to pipes + */ +- if (inode->i_uid != current_fsuid()) ++ if (!ispipe && (inode->i_uid != current_fsuid())) + goto close_fail; + if (!cprm.file->f_op) + goto close_fail; diff --git a/crypto-add-async-hash-testing.patch b/crypto-add-async-hash-testing.patch new file mode 100644 index 000000000..8df0ad44f --- /dev/null +++ b/crypto-add-async-hash-testing.patch @@ -0,0 +1,111 @@ +From e45009229be6a7fae49bdfa3459905668c0b0fb1 Mon Sep 17 00:00:00 2001 +From: David S. Miller +Date: Wed, 19 May 2010 14:12:03 +1000 +Subject: crypto: testmgr - Add testing for async hashing and update/final + +Extend testmgr such that it tests async hash algorithms, +and that for both sync and async hashes it tests both +->digest() and ->update()/->final() sequences. + +Signed-off-by: David S. Miller +Signed-off-by: Herbert Xu +--- + crypto/testmgr.c | 66 +++++++++++++++++++++++++++++++++++++++-------------- + 1 files changed, 48 insertions(+), 18 deletions(-) + +diff --git a/crypto/testmgr.c b/crypto/testmgr.c +index c494d76..5c8aaa0 100644 +--- a/crypto/testmgr.c ++++ b/crypto/testmgr.c +@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) + free_page((unsigned long)buf[i]); + } + ++static int do_one_async_hash_op(struct ahash_request *req, ++ struct tcrypt_result *tr, ++ int ret) ++{ ++ if (ret == -EINPROGRESS || ret == -EBUSY) { ++ ret = wait_for_completion_interruptible(&tr->completion); ++ if (!ret) ++ ret = tr->err; ++ INIT_COMPLETION(tr->completion); ++ } ++ return ret; ++} ++ + static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, +- unsigned int tcount) ++ unsigned int tcount, bool use_digest) + { + const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); + unsigned int i, j, k, temp; +@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, + } + + ahash_request_set_crypt(req, sg, result, template[i].psize); +- ret = crypto_ahash_digest(req); +- switch (ret) { +- case 0: +- break; +- case -EINPROGRESS: +- case -EBUSY: +- ret = wait_for_completion_interruptible( +- &tresult.completion); +- if (!ret && !(ret = tresult.err)) { +- INIT_COMPLETION(tresult.completion); +- break; ++ if (use_digest) { ++ ret = do_one_async_hash_op(req, &tresult, ++ crypto_ahash_digest(req)); ++ if (ret) { ++ pr_err("alg: hash: digest failed on test %d " ++ "for %s: ret=%d\n", j, algo, -ret); ++ goto out; ++ } ++ } else { ++ ret = do_one_async_hash_op(req, &tresult, ++ crypto_ahash_init(req)); ++ if (ret) { ++ pr_err("alt: hash: init failed on test %d " ++ "for %s: ret=%d\n", j, algo, -ret); ++ goto out; ++ } ++ ret = do_one_async_hash_op(req, &tresult, ++ crypto_ahash_update(req)); ++ if (ret) { ++ pr_err("alt: hash: update failed on test %d " ++ "for %s: ret=%d\n", j, algo, -ret); ++ goto out; ++ } ++ ret = do_one_async_hash_op(req, &tresult, ++ crypto_ahash_final(req)); ++ if (ret) { ++ pr_err("alt: hash: final failed on test %d " ++ "for %s: ret=%d\n", j, algo, -ret); ++ goto out; + } +- /* fall through */ +- default: +- printk(KERN_ERR "alg: hash: digest failed on test %d " +- "for %s: ret=%d\n", j, algo, -ret); +- goto out; + } + + if (memcmp(result, template[i].digest, +@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, + return PTR_ERR(tfm); + } + +- err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); ++ err = test_hash(tfm, desc->suite.hash.vecs, ++ desc->suite.hash.count, true); ++ if (!err) ++ err = test_hash(tfm, desc->suite.hash.vecs, ++ desc->suite.hash.count, false); + + crypto_free_ahash(tfm); + return err; +-- +1.7.0.1 + diff --git a/die-floppy-die.patch b/die-floppy-die.patch new file mode 100644 index 000000000..76db31218 --- /dev/null +++ b/die-floppy-die.patch @@ -0,0 +1,30 @@ +From 4ff58b642f80dedb20533978123d89b5ac9b1ed5 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Tue, 30 Mar 2010 00:04:29 -0400 +Subject: die-floppy-die + +Kill the floppy.ko pnp modalias. We were surviving just fine without +autoloading floppy drivers, tyvm. + +Please feel free to register all complaints in the wastepaper bin. +--- + drivers/block/floppy.c | 3 +-- + 1 files changed, 1 insertions(+), 2 deletions(-) + +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index 90c4038..f4a0b90 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -4619,8 +4619,7 @@ static const struct pnp_device_id floppy_pnpids[] = { + {"PNP0700", 0}, + {} + }; +- +-MODULE_DEVICE_TABLE(pnp, floppy_pnpids); ++/* MODULE_DEVICE_TABLE(pnp, floppy_pnpids); */ + + #else + +-- +1.7.0.1 + diff --git a/disable-i8042-check-on-apple-mac.patch b/disable-i8042-check-on-apple-mac.patch new file mode 100644 index 000000000..f99d0f900 --- /dev/null +++ b/disable-i8042-check-on-apple-mac.patch @@ -0,0 +1,59 @@ +From 2a79554c864ac58fa2ad982f0fcee2cc2aa33eb5 Mon Sep 17 00:00:00 2001 +From: Bastien Nocera +Date: Thu, 20 May 2010 10:30:31 -0400 +Subject: Disable i8042 checks on Intel Apple Macs + +As those computers never had any i8042 controllers, and the +current lookup code could potentially lock up/hang/wait for +timeout for long periods of time. + +Fixes intermittent hangs on boot on a MacbookAir1,1 + +Signed-off-by: Bastien Nocera +--- + drivers/input/serio/i8042.c | 22 ++++++++++++++++++++++ + 1 files changed, 22 insertions(+), 0 deletions(-) + +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 6440a8f..4d7cf98 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -1451,6 +1451,22 @@ static struct platform_driver i8042_driver = { + .shutdown = i8042_shutdown, + }; + ++#ifdef CONFIG_DMI ++static struct dmi_system_id __initdata dmi_system_table[] = { ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "Apple Computer, Inc.") ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "Apple Inc.") ++ }, ++ }, ++ {} ++}; ++#endif /*CONFIG_DMI*/ ++ + static int __init i8042_init(void) + { + struct platform_device *pdev; +@@ -1458,6 +1474,12 @@ static int __init i8042_init(void) + + dbg_init(); + ++#ifdef CONFIG_DMI ++ /* Intel Apple Macs never have an i8042 controller */ ++ if (dmi_check_system(dmi_system_table) > 0) ++ return -ENODEV; ++#endif /*CONFIG_DMI*/ ++ + err = i8042_platform_init(); + if (err) + return err; +-- +1.7.0.1 + diff --git a/drm-encoder-disable.patch b/drm-encoder-disable.patch new file mode 100644 index 000000000..8c8c7cb45 --- /dev/null +++ b/drm-encoder-disable.patch @@ -0,0 +1,74 @@ +From 0b91f360956aa7a5aa8900d358d1bff3020182e0 Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Thu, 1 Jul 2010 12:34:56 +1000 +Subject: [PATCH 1/2] drm: disable encoder rather than dpms off in drm_crtc_prepare_encoders() + +Original behaviour will be preserved for drivers that don't implement +disable() hooks for an encoder. + +Signed-off-by: Ben Skeggs +--- + drivers/gpu/drm/drm_crtc_helper.c | 22 ++++++++++++++-------- + 1 files changed, 14 insertions(+), 8 deletions(-) + +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index b142ac2..32dae0e 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -201,6 +201,17 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) + } + EXPORT_SYMBOL(drm_helper_crtc_in_use); + ++static void ++drm_encoder_disable(struct drm_encoder *encoder) ++{ ++ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ++ ++ if (encoder_funcs->disable) ++ (*encoder_funcs->disable)(encoder); ++ else ++ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); ++} ++ + /** + * drm_helper_disable_unused_functions - disable unused objects + * @dev: DRM device +@@ -215,7 +226,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) + { + struct drm_encoder *encoder; + struct drm_connector *connector; +- struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_crtc *crtc; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +@@ -226,12 +236,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- encoder_funcs = encoder->helper_private; + if (!drm_helper_encoder_in_use(encoder)) { +- if (encoder_funcs->disable) +- (*encoder_funcs->disable)(encoder); +- else +- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); ++ drm_encoder_disable(encoder); + /* disconnector encoder from any connector */ + encoder->crtc = NULL; + } +@@ -292,11 +298,11 @@ drm_crtc_prepare_encoders(struct drm_device *dev) + encoder_funcs = encoder->helper_private; + /* Disable unused encoders */ + if (encoder->crtc == NULL) +- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); ++ drm_encoder_disable(encoder); + /* Disable encoders whose CRTC is about to change */ + if (encoder_funcs->get_crtc && + encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) +- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); ++ drm_encoder_disable(encoder); + } + } + +-- +1.7.1.1 + diff --git a/drm-i915-add-reclaimable-to-page-allocations.patch b/drm-i915-add-reclaimable-to-page-allocations.patch new file mode 100644 index 000000000..6014f2c15 --- /dev/null +++ b/drm-i915-add-reclaimable-to-page-allocations.patch @@ -0,0 +1,48 @@ +From: Linus Torvalds +Date: Sun, 18 Jul 2010 16:44:37 +0000 (-0700) +Subject: drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=cd9f040df6ce46573760a507cb88192d05d27d86 + +drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations + +The hibernate issues that got fixed in commit 985b823b9192 ("drm/i915: +fix hibernation since i915 self-reclaim fixes") turn out to have been +incomplete. Vefa Bicakci tested lots of hibernate cycles, and without +the __GFP_RECLAIMABLE flag the system eventually fails to resume. + +With the flag added, Vefa can apparently hibernate forever (or until he +gets bored running his automated scripts, whichever comes first). + +The reclaimable flag was there originally, and was one of the flags that +were dropped (unintentionally) by commit 4bdadb978569 ("drm/i915: +Selectively enable self-reclaim") that introduced all these problems, +but I didn't want to just blindly add back all the flags in commit +985b823b9192, and it looked like __GFP_RECLAIM wasn't necessary. It +clearly was. + +I still suspect that there is some subtle reason we're missing that +causes the problems, but __GFP_RECLAIMABLE is certainly not wrong to use +in this context, and is what the code historically used. And we have no +idea what the causes the corruption without it. + +Reported-and-tested-by: M. Vefa Bicakci +Cc: Dave Airlie +Cc: Chris Wilson +Cc: KOSAKI Motohiro +Cc: Hugh Dickins +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +--- + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 0743858..8757ecf 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, + page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | + __GFP_COLD | ++ __GFP_RECLAIMABLE | + gfpmask); + if (IS_ERR(page)) + goto err_pages; diff --git a/drm-i915-fix-edp-panels.patch b/drm-i915-fix-edp-panels.patch new file mode 100644 index 000000000..01d3edd1b --- /dev/null +++ b/drm-i915-fix-edp-panels.patch @@ -0,0 +1,12 @@ +diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c.dave linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c +--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c.dave 2010-06-25 16:30:13.000000000 +1000 ++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c 2010-06-25 16:30:23.000000000 +1000 +@@ -129,7 +129,7 @@ intel_dp_link_required(struct drm_device + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_eDP(intel_encoder)) +- return (pixel_clock * dev_priv->edp_bpp) / 8; ++ return (pixel_clock * ALIGN(dev_priv->edp_bpp, 8)) / 8; + else + return pixel_clock * 3; + } diff --git a/drm-i915-fix-hibernate-memory-corruption.patch b/drm-i915-fix-hibernate-memory-corruption.patch new file mode 100644 index 000000000..a9c2c18eb --- /dev/null +++ b/drm-i915-fix-hibernate-memory-corruption.patch @@ -0,0 +1,41 @@ +From: Linus Torvalds +Date: Fri, 2 Jul 2010 00:04:42 +0000 (+1000) +Subject: drm/i915: fix hibernation since i915 self-reclaim fixes +X-Git-Tag: v2.6.35-rc4~13 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=985b823b919273fe1327d56d2196b4f92e5d0fae + +drm/i915: fix hibernation since i915 self-reclaim fixes + +Since commit 4bdadb9785696439c6e2b3efe34aa76df1149c83 ("drm/i915: +Selectively enable self-reclaim"), we've been passing GFP_MOVABLE to the +i915 page allocator where we weren't before due to some over-eager +removal of the page mapping gfp_flags games the code used to play. + +This caused hibernate on Intel hardware to result in a lot of memory +corruptions on resume. See for example + + http://bugzilla.kernel.org/show_bug.cgi?id=13811 + +Reported-by: Evengi Golov (in bugzilla) +Signed-off-by: Dave Airlie +Tested-by: M. Vefa Bicakci +Cc: stable@kernel.org +Cc: Chris Wilson +Cc: KOSAKI Motohiro +Cc: Hugh Dickins +Signed-off-by: Linus Torvalds +--- + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 9ded3da..0743858 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2239,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, + mapping = inode->i_mapping; + for (i = 0; i < page_count; i++) { + page = read_cache_page_gfp(mapping, i, +- mapping_gfp_mask (mapping) | ++ GFP_HIGHUSER | + __GFP_COLD | + gfpmask); + if (IS_ERR(page)) diff --git a/drm-i915-make-G4X-style-PLL-search-more-permissive.patch b/drm-i915-make-G4X-style-PLL-search-more-permissive.patch new file mode 100644 index 000000000..b7d863076 --- /dev/null +++ b/drm-i915-make-G4X-style-PLL-search-more-permissive.patch @@ -0,0 +1,51 @@ +drm/i915: Make G4X-style PLL search more permissive + +Fixes an Ironlake laptop with a 68.940MHz 1280x800 panel and 120MHz SSC +reference clock. + +More generally, the 0.488% tolerance used before is just too tight to +reliably find a PLL setting. I extracted the search algorithm and +modified it to find the dot clocks with maximum error over the valid +range for the given output type: + +http://people.freedesktop.org/~ajax/intel_g4x_find_best_pll.c + +This gave: + +Worst dotclock for Ironlake DAC refclk is 350000kHz (error 0.00571) +Worst dotclock for Ironlake SL-LVDS refclk is 102321kHz (error 0.00524) +Worst dotclock for Ironlake DL-LVDS refclk is 219642kHz (error 0.00488) +Worst dotclock for Ironlake SL-LVDS SSC refclk is 84374kHz (error 0.00529) +Worst dotclock for Ironlake DL-LVDS SSC refclk is 183035kHz (error 0.00488) +Worst dotclock for G4X SDVO refclk is 50000kHz (error 0.17332) +Worst dotclock for G4X HDMI refclk is 334400kHz (error 0.00478) +Worst dotclock for G4X SL-LVDS refclk is 95571kHz (error 0.00449) +Worst dotclock for G4X DL-LVDS refclk is 224000kHz (error 0.00510) + +The SDVO number looks a bit suspicious, which I haven't tracked down +yet. But it's clear that the old threshold is too tight. + +Signed-off-by: Adam Jackson +[ RHBZ #572799 ] +--- + drivers/gpu/drm/i915/intel_display.c | 4 ++-- + 1 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index a8d65b7..4b17722 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + intel_clock_t clock; + int max_n; + bool found; +- /* approximately equals target * 0.00488 */ +- int err_most = (target >> 8) + (target >> 10); ++ /* approximately equals target * 0.00585 */ ++ int err_most = (target >> 8) + (target >> 9); + found = false; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { +-- +1.7.1 + diff --git a/drm-intel-945gm-stability-fixes.patch b/drm-intel-945gm-stability-fixes.patch new file mode 100644 index 000000000..135d108c5 --- /dev/null +++ b/drm-intel-945gm-stability-fixes.patch @@ -0,0 +1,102 @@ +upstream commit 944001201ca0196bcdb088129e5866a9f379d08c +(plus some defines) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 0d05c6f..b87f65d 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -4967,6 +4967,16 @@ i915_gem_load(struct drm_device *dev) + list_add(&dev_priv->mm.shrink_list, &shrink_list); + spin_unlock(&shrink_list_lock); + ++ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ ++ if (IS_GEN3(dev)) { ++ u32 tmp = I915_READ(MI_ARB_STATE); ++ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { ++ /* arb state is a masked write, so set bit + bit in mask */ ++ tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); ++ I915_WRITE(MI_ARB_STATE, tmp); ++ } ++ } ++ + /* Old X drivers will take 0-2 for front, back, depth buffers */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + dev_priv->fence_reg_start = 3; +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 4cbc521..4543975 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -357,6 +357,70 @@ + #define LM_BURST_LENGTH 0x00000700 + #define LM_FIFO_WATERMARK 0x0000001F + #define MI_ARB_STATE 0x020e4 /* 915+ only */ ++#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ ++ ++/* Make render/texture TLB fetches lower priorty than associated data ++ * fetches. This is not turned on by default ++ */ ++#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) ++ ++/* Isoch request wait on GTT enable (Display A/B/C streams). ++ * Make isoch requests stall on the TLB update. May cause ++ * display underruns (test mode only) ++ */ ++#define MI_ARB_ISOCH_WAIT_GTT (1 << 14) ++ ++/* Block grant count for isoch requests when block count is ++ * set to a finite value. ++ */ ++#define MI_ARB_BLOCK_GRANT_MASK (3 << 12) ++#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ ++#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ ++#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ ++#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ ++ ++/* Enable render writes to complete in C2/C3/C4 power states. ++ * If this isn't enabled, render writes are prevented in low ++ * power states. That seems bad to me. ++ */ ++#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) ++ ++/* This acknowledges an async flip immediately instead ++ * of waiting for 2TLB fetches. ++ */ ++#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) ++ ++/* Enables non-sequential data reads through arbiter ++ */ ++#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) ++ ++/* Disable FSB snooping of cacheable write cycles from binner/render ++ * command stream ++ */ ++#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) ++ ++/* Arbiter time slice for non-isoch streams */ ++#define MI_ARB_TIME_SLICE_MASK (7 << 5) ++#define MI_ARB_TIME_SLICE_1 (0 << 5) ++#define MI_ARB_TIME_SLICE_2 (1 << 5) ++#define MI_ARB_TIME_SLICE_4 (2 << 5) ++#define MI_ARB_TIME_SLICE_6 (3 << 5) ++#define MI_ARB_TIME_SLICE_8 (4 << 5) ++#define MI_ARB_TIME_SLICE_10 (5 << 5) ++#define MI_ARB_TIME_SLICE_14 (6 << 5) ++#define MI_ARB_TIME_SLICE_16 (7 << 5) ++ ++/* Low priority grace period page size */ ++#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ ++#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) ++ ++/* Disable display A/B trickle feed */ ++#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) ++ ++/* Set display plane priority */ ++#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ ++#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ ++ + #define CACHE_MODE_0 0x02120 /* 915+ only */ + #define CM0_MASK_SHIFT 16 + #define CM0_IZ_OPT_DISABLE (1<<6) +-- +1.7.1 + diff --git a/drm-intel-big-hammer.patch b/drm-intel-big-hammer.patch new file mode 100644 index 000000000..63dc016b1 --- /dev/null +++ b/drm-intel-big-hammer.patch @@ -0,0 +1,16 @@ +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 37427e4..08af9db 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2553,6 +2553,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, + + mutex_lock(&dev->struct_mutex); + ++ /* We don't get the flushing right for these chipsets, use the ++ * big hamer for now to avoid random crashiness. */ ++ if (IS_I85X(dev) || IS_I865G(dev)) ++ wbinvd(); ++ + i915_verify_inactive(dev, __FILE__, __LINE__); + + if (dev_priv->mm.wedged) { diff --git a/drm-intel-make-lvds-work.patch b/drm-intel-make-lvds-work.patch new file mode 100644 index 000000000..5ca0152da --- /dev/null +++ b/drm-intel-make-lvds-work.patch @@ -0,0 +1,19 @@ +diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c +--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig 2010-03-31 16:59:39.901995671 -0400 ++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c 2010-03-31 17:01:05.416996744 -0400 +@@ -3757,7 +3757,6 @@ struct drm_crtc *intel_get_load_detect_p + void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) + { + struct drm_encoder *encoder = &intel_encoder->enc; +- struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; +@@ -3767,7 +3766,6 @@ void intel_release_load_detect_pipe(stru + intel_encoder->base.encoder = NULL; + intel_encoder->load_detect_temp = false; + crtc->enabled = drm_helper_crtc_in_use(crtc); +- drm_helper_disable_unused_functions(dev); + } + + /* Switch crtc and output back off if necessary */ diff --git a/drm-intel-next.patch b/drm-intel-next.patch new file mode 100644 index 000000000..c6cac6926 --- /dev/null +++ b/drm-intel-next.patch @@ -0,0 +1 @@ +empty diff --git a/drm-next.patch b/drm-next.patch new file mode 100644 index 000000000..f9a5b7932 --- /dev/null +++ b/drm-next.patch @@ -0,0 +1,44474 @@ +commit a77889d1b091dd6783db3e1b059cc378d37f9982 +Author: Kyle McMartin +Date: Wed Jun 16 15:06:54 2010 +0100 + + provide a knob to {en,dis}able radeon_pm + +commit ef24e3e0e644621e2c98d38f27f4b25d23875256 +Author: Kyle McMartin +Date: Wed Jun 16 14:51:26 2010 +0100 + + Merge local branch 'drm-since-1067b6c' + + % git log --no-merges --oneline 1067b6c..v2.6.35-rc3 -- drivers/gpu/drm + 8d86dc6 Revert "drm/i915: Don't enable pipe/plane/VCO early (wait for DPMS on)." + b62e948 drm/radeon: don't poll tv dac if crtc2 is in use. + d294ed6 drm/radeon: reset i2c valid to avoid incorrect tv-out polling. + 4eb3033 drm/nv50: fix iommu errors caused by device reading from address 0 + 7504794 drm/nouveau: off by one in init_i2c_device_find() + 55a4c5c nouveau: off by one in nv50_gpio_location() + 6d69630 drm/nouveau: completely fail init if we fail to map the PRAMIN BAR + 1eb3810 drm/nouveau: match U/DP script against SOR link + f712d0c drm/radeon/kms/pm: resurrect printing power states + 0fcbe94 drm/radeon/kms: add trivial debugging for voltage + a081a9d drm/radeon/kms/r600+: use voltage from requested clock mode (v3) + 4d60173 drm/radeon/kms/pm: track current voltage (v2) + aa1df0f drm/radeon/kms/pm: Disable voltage adjust on RS780/RS880 + cbd4623 drm/radeon/kms: fix typo in printing the HPD info + c9e75b2 drm/radeon/kms/pm: add mid profile + f8ed8b4 drm/radeon/kms/pm: Misc fixes + 8de016e drm/radeon/kms/combios: fix typo in voltage fix + 148a03b drm/radeon/kms/evergreen: set accel_enabled + 9b8eb4d drm/vmwgfx: return -EFAULT for copy_to_user errors + e902a35 drm/drm_crtc: return -EFAULT on copy_to_user errors + fc2362a drm/fb: use printk to print out the switching to text mode error. + 9bad145 drm/radeon: fix PM on non-vram cards. + 5a79395 drm: Propagate error from drm_fb_helper_init(). + a3524f1 drm/i915: fix oops on single crtc devices. + e7b526b drm/i915: Move non-phys cursors into the GTT + +commit e1442526a8b1b9a0ffd3f8778d2ff40597ef4662 +Author: Kyle McMartin +Date: Mon May 31 12:38:09 2010 +0100 + + nouveau is not in staging on Fedora + +commit fcd86a22bc88817a417185602e90451a3c5a25b8 +Author: Linus Torvalds +Date: Thu Jun 3 07:19:45 2010 -0700 + + Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 + + * 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (41 commits) + % git log --oneline --no-merges a652883..1067b6c + d8dcaa1 drm/radeon/kms: make sure display hw is disabled when suspending + d8bd19d drm/vmwgfx: Allow userspace to change default layout. Bump minor. + 991b7b4 drm/vmwgfx: Fix framebuffer modesetting + 7c4f778 drm/vmwgfx: Fix vga save / restore with display topology. + c0db9cb vgaarb: use MIT license + 2d6e9b9 vgaarb: convert pr_devel() to pr_debug() + ce04cc0 drm: fix typos in Linux DRM Developer's Guide + 84d88f4 drm/radeon/kms/pm: voltage fixes + 9264587 drm/radeon/kms/pm: radeon_set_power_state fixes + c5e8ce6 drm/radeon/kms/pm: patch default power state with default clocks/voltages on r6xx+ + 9349d5c drm/radeon/kms/pm: enable SetVoltage on r7xx/evergreen + 7ac9aa5 drm/radeon/kms/pm: add support for SetVoltage cmd table (V2) + cb5fcbd drm/radeon/kms/evergreen: add initial CS parser + fbf8176 drm/kms: disable/enable poll around switcheroo on/off + fc5ea29 drm/nouveau: fixup confusion over which handle the DSM is hanging off. + afeb3e1 drm/nouveau: attempt to get bios from ACPI v3 + 8b281db drm/nv50: cast IGP memory location to u64 before shifting + 4abe438 drm/ttm: Fix ttm_page_alloc.c + e8613c0 drm/ttm: Fix cached TTM page allocation. + 1ca14e7 drm/vmwgfx: Remove some leftover debug messages. + 316ab13 drm/vmwgfx: Print warnings in kernel log about bo pinning that fails. + 792778e drm/vmwgfx: Unpause overlay on update. + 259600d drm/vmwgfx: Some modesetting cleanups and fixes. + d451f62 drm/vmwgfx: Don't use SVGA_REG_ENABLE in modesetting code. + bbfad33 drm/vmwgfx: Remove duplicate member from struct vmw_legacy_display_unit. + 22ee861 drm/vmwgfx: Reserve first part of VRAM for framebuffer. + d7e1958 drm/vmwgfx: Support older hardware. + 1ae1ddd drm/vmwgfx: Get connector status from detection function. + 1925d45 drm/vmwgfx: Add kernel throttling support. Bump minor. + 04e9e94 drm/vmwgfx: Make sure to unpin old and pin new framebuffer. + 6a591a9 drm/vmwgfx: Fix single framebuffer detection. + 7e71f8a drm/vmwgfx: Assume larger framebuffer max size. + becd214 drm/nv50: use alternate source of SOR_MODE_CTRL for DP hack + 26099a7 drm/nouveau: fix dual-link displays when plugged into single-link outputs + 2c58077 drm/nv50: obey dcb->duallink_possible + 2348487 drm/nv50: fix duallink_possible calculation for DCB 4.0 cards + 73db4be drm/nouveau: don't execute INIT_GPIO unless we're really running the table + f50c0b9 drm/nv40: allow cold-booting of nv4x chipsets + d13102c drm/nouveau: fix POST detection for certain chipsets + 7fc74f1 drm/nouveau: Add getparam for current PTIMER time. + b334f2b drm/nouveau: allow cursor image and position to survive suspend + +commit 663568ea6a7503a12898c7f1ba8192c8d42a28ac +Author: Linus Torvalds +Date: Tue Jun 1 14:12:27 2010 -0700 + + Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel + + * 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (41 commits) + % git log --oneline --no-merges 08a6685..709d015 + e3a815f drm/i915: add HAS_BSD check to i915_getparam + 9bc3549 drm/i915: Honor sync polarity from VBT panel timing descriptors + a1786bd drm/i915: Unmask interrupt for render engine on Sandybridge + ca76482 drm/i915: Fix PIPE_CONTROL command on Sandybridge + ab34c22 drm/i915: Fix up address spaces in slow_kernel_write() + 99a03df drm/i915: Use non-atomic kmap for slow copy paths + 9b8c4a0 drm/i915: Avoid moving from CPU domain during pwrite + 68f95ba drm/i915: Cleanup after failed initialization of ringbuffers + 654fc60 drm/i915: Reject bind_to_gtt() early if object > aperture + 85cd461 drm/i915: Check error code whilst moving buffer to GTT domain. + 3d1cc47 drm/i915: Remove spurious warning "Failure to install fence" + ac0c6b5 drm/i915: Rebind bo if currently bound with incorrect alignment. + a7faf32 drm/i915: Include pitch in set_base debug statement. + a939406 drm/i915: Only print "nothing to do" debug message as required. + 808b24d drm/i915: Propagate error from unbinding an unfenceable object. + b118c1e drm/i915: Avoid nesting of domain changes when setting display plane + 468f0b4 drm/i915: Hold the spinlock whilst resetting unpin_work along error path + 35aed2e drm/i915: Only print an message if there was an error + e20f9c6 drm/i915: Clean up leftover bits from hws move to ring structure. + 9553426 drm/i915: Add CxSR support on Pineview DDR3 + d8201ab i915: remove unneeded null checks + 90a78e8 i915/intel_sdvo: remove unneeded null check + 467b200 drm/i915: Fix HDMI mode select for Cougarpoint PCH + 778c354 drm/i915: combine all small integers into one single bitfield + a7de64e drm/i915/dp: Add DPCD data to debug output + 9962c92 drm/i915/dp: Only enable enhanced framing if the sink supports it + 9908ff7 drm/i915: Kill dangerous pending-flip debugging + f1befe7 agp/intel: Restrict GTT mapping to valid range on i915 and i945 + 9a7e849 drm/i915: Storage class should be before const qualifier + 7648fa9 drm/i915: add power monitoring support + 7a772c4 drm/i915/gen4: Extra CRT hotplug paranoia + 734b415 drm/i915: Add support for interlaced display. + f953c93 i915: fix lock imbalance on error path... + f41275e drm/i915: Convert more trace events to DEFINE_EVENT + 9517a92 drm/i915: add timeout to FBC disable waits + d1b851f drm/i915: implement BSD ring buffer V2 + 852835f drm/i915: convert some gem structures to per-ring V2 + 8187a2b drm/i915: introduce intel_ring_buffer structure (V2) + d3301d8 drm/i915: Rename dev_priv->ring to dev_priv->render_ring. + 62fdfea drm/i915: Move ringbuffer-related code to intel_ringbuffer.c. + 79a78dd drm/i915: Fail to load driver if KMS request without GEM + +commit 30f0d753b32570886e6b98812d33df30229dcf87 +Author: Linus Torvalds +Date: Fri May 28 16:14:40 2010 -0700 + + Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 + + * 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (1 commit) + % git log --oneline --no-merges e4f2e5e..24010e4 + cf22f20 drm/radeon: fix the r100/r200 ums block 0 page fix + +commit 5bf8778218d6085190bed41b729f6001e712b057 +Author: Linus Torvalds +Date: Wed May 26 12:30:09 2010 -0700 + + Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 + + * 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (6 commits) + % git log --oneline --no-merges 91803b4..31f4671 + 2101d6f agp: amd64, fix pci reference leaks + 4a638b4 drm/edid: Allow non-fatal checksum errors in CEA blocks + 921d98b drm/radeon/kms: suppress a build warning (unused variable) + f49d273 drm: Fixes linux-next & linux-2.6 checkstack warnings: + 5797660 nouveau: fix acpi_lid_open undefined + 10b0612 drm/radeon/kms: release AGP bridge at suspend + +commit 019d6c44898a414e7d6ef16fce1950577163cccb +Author: Linus Torvalds +Date: Fri May 21 11:14:52 2010 -0700 + + Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 + + * 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits) + % git log --oneline --no-merges ac3ee84..59534f7 + b486787 drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile + 5d9b7e2 drm/radeon: fix power supply kconfig interaction. + e865275 drm/radeon/kms: record object that have been list reserved + 365048f drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU. + 4573744 drm/radeon/kms: don't default display priority to high on rs4xx + c43ae47 drm/edid: fix typo in 1600x1200@75 mode + 893887ed drm/nouveau: fix i2c-related init table handlers + 04f542c drm/nouveau: support init table i2c device identifier 0x81 + f8b0be1 drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers + 92b9618 drm/nouveau: display error message for any failed init table opcode + 9170a82 drm/nouveau: fix init table handlers to return proper error codes + e9ebb68 drm/nv50: support fractional feedback divider on newer chips + 7e99a9b drm/nv50: fix monitor detection on certain chipsets + 07fee3d drm/nv50: store full dcb i2c entry from vbios + afa3b4c drm/nv50: fix suspend/resume with DP outputs + 17b96cc drm/nv50: output calculated crtc pll when debugging on + 4c389f0 drm/nouveau: dump pll limits entries when debugging is on + 25908b7 drm/nouveau: bios parser fixes for eDP boards + 90af89b drm/nouveau: fix a nouveau_bo dereference after it's been destroyed + de1f46a drm/nv40: remove some completed ctxprog TODOs + f23d4cf drm/nv04: Implement missing nv04 PGRAPH methods in software. + a0e6544 drm/nouveau: Use 0x5f instead of 0x9f as imageblit on original NV10. + 6698998 drm/radeon: fix hdmi offset bug reported by smatch. + 8e36ed0 drm/radeon/kms: hpd cleanup + 2bfcc0f drm/radeon/kms: reset ddc_bus in object header parsing + 6fd0248 amd64-agp: Probe unknown AGP devices the right way + d831692 sis-agp: Remove SIS 760, handled by amd64-agp + 26481fb drm/radeon/pm: fix device_create_file return value checks. + 4bff517 drm/radeon/kms/pm: fix r6xx+ profile setup + ce8a3eb drm/radeon/kms/pm: make pm spam debug only + ce8f537 drm/radeon/kms/pm: rework power management + d731117 drm/radeon/kms/pm: add support for no display power states + ca2af92 drm/radeon/kms: fix lock ordering in ring, ib handling + 01434b4 radeon: Use fences to gate entry to reclocking on drm handoff + 06415c5 fbmem, drm/nouveau: kick firmware framebuffers as soon as possible + 1471ca9 fbdev: allow passing more than one aperture for handoff + 3da1f33 drm: Prefix info printk about registering panic notifier with 'drm' + bc35afd drm/radeon/kms: add query for crtc hw id from crtc id to get info V2 + 61dd98f drm/edid: Fix 1024x768@85Hz + 6ebc22e drivers/gpu/drm: Use kzalloc + 96525a2 drm_edid: There should be 6 Standard Timings + f405a1a drivers/gpu/drm: Use kmemdup + ca117d6 vga: fix kconfig text typos + 0bcad4c drm/edid: remove an unneeded variable + 68b61a7 drm/radeon/kms/combios: match lvds panel info parsing to ddx + 1ff26a3 drm/radeon/kms/atom: fix typo in LVDS panel info parsing + 8bf3aae drm/radeon/kms: fix copy pasto in disable encoders patch + a7c5427 drm/i915: Fix out of tree builds + 007cc8a drm/i915: move fence lru to struct drm_i915_fence_reg + 31770bd drm/i915: don't allow tiling changes on pinned buffers v2 + 149c36a drm/i915: Be extra careful about A/D matching for multifunction SDVO + b108333 drm/i915: Fix DDC bus selection for multifunction SDVO + aa96139 drm/radeon/kms/atom: disable the encoders in encoder_disable + 3d8620c drm/i915: cleanup mode setting before unmapping registers + ee5382a drm/i915: Make fbc control wrapper functions + 1637ef4 drm/i915: Wait for the GPU whilst shrinking, if truly desperate. + 0a31a44 drm/i915: Use spatio-temporal dithering on PCH + 9e51159 drm/ttm: fix, avoid iomapping system memory + a1e9ada drm/radeon/kms: R3XX-R4XX fix GPU reset code + f259493 drm/radeon/kms: HDMI irq support + 58bd086 drm/radeon/kms: rework audio polling timer + 61cf059 agp: use scratch page on memory remove and at GATT creation V4 + 2d2ef82 drm: add initial DRM developer documentation + 10fd883 agp/intel: put back check that we have a driver for the bridge. + d4b74bf Revert "drm/i915: Configure the TV sense state correctly on GM45 to make TV detection reliable" + 6b8b178 drm/radeon/kms: enable use of unmappable VRAM V2 + 0c321c7 drm/ttm: remove io_ field from TTM V6 + 96bf8b8 drm/vmwgfx: add support for new TTM fault callback V5 + f32f02f drm/nouveau/kms: add support for new TTM fault callback V5 + 0a2d50e drm/radeon/kms: add support for new fault callback V7 + 82c5da6 drm/ttm: ttm_fault callback to allow driver to handle bo placement V6 + a8089e8 drm/i915: drop pointer to drm_gem_object + 62b8b21 drm/i915: don't use ->driver_private anymore + c397b90 drm/i915: embed the gem object into drm_i915_gem_object + ac52bc5 drm/i915: introduce i915_gem_alloc_object + fd632aa drm: free core gem object from driver callbacks + 1d39704 drm: extract drm_gem_object_init + e158316 agp/intel-gtt: kill previous_size assignments + 1ca46bd agp/intel-gtt: kill intel_i830_tlbflush + 22dd82a agp/intel: split out gmch/gtt probe, part 1 + 059efc6 agp/intel: kill mutli_gmch_chip + e5a04d5 agp/intel: uncoditionally reconfigure driver on resume + f51b766 agp/intel: split out the GTT support + ff7cdd6 agp/intel: introduce intel-agp.h header file + 6e0032f drm/i915: Don't touch PORT_HOTPLUG_EN in intel_dp_detect() + 77ffb59 drm/i915/pch: Use minimal number of FDI lanes (v2) + 7f8a856 drm/i915: Add the support of memory self-refresh on Ironlake + d429434 drm/i915: Move Pineview CxSR and watermark code into update_wm hook. + a2c459e drm/i915: Only save/restore FBC on the platform that supports FBC + 8a1837c drm/i915: Fix the incorrect argument for SDVO SET_TV_format command + 461ed3c drm/i915: Add support of SDVO on Ibexpeak PCH + cfecde4 drm/i915: Don't enable pipe/plane/VCO early (wait for DPMS on). + ea059a1 drm/i915: do not read uninitialized ->dev_private + a1f4b7f Revert "drm/i915: Use a dmi quirk to skip a broken SDVO TV output." + 14571b4 drm/i915: implement multifunction SDVO device support + 409608b drm/i915: remove unused intel_pipe_get_connector() + 1f254ec drm/i915: remove connector object in old output structure + 0c41ee2 drm/i915: convert TV driver to new encoder/connector structure + d2a82a6 drm/i915: convert SDVO driver to new encoder/connector structure + 599be16 drm/i915: convert DVO driver to new encoder/connector structure + 55f78c4 drm/i915: convert DP/eDP driver to new encoder/connector structure + 674e2d0 drm/i915: convert HDMI driver to new encoder/connector structure + bb8a356 drm/i915: convert LVDS driver to new encoder/connector structure + 454c1ca drm/i915: convert VGA driver to new encoder/connector structure + 9c9e792 drm/i915: Set sync polarity correctly on DisplayPort + ab00a9e drm/i915: Un-magic a DPCD register write + e3421a1 drm/i915: enable DP/eDP for Sandybridge/Cougarpoint + 0f22906 drm/i915: enable HDMI on Cougarpoint + b3b095b drm/i915: enable LVDS on Cougarpoint + a4a6b90 drm/i915: Fix CRT force detect on Cougarpoint + 8db9d77 drm/i915: Support for Cougarpoint PCH display pipeline + 3bad078 drm/i915: Probe for PCH chipset type + 7da9f6c drm/i915: Sandybridge has no integrated TV + edcb49c drm/i915: Fix legacy BLC event for pipe A + d275f66 drm/i915: Clear the LVDS pipe B select bit when moving the LVDS to pipe A. + 0f3ee80 drm/i915: Allow LVDS on pipe A on gen4+ + 6443170 drm/i915: Remove dead KMS encoder save/restore code. + 522032d drm/edid: When checking duplicate standard modes, walked the probed list + 335af9a drm/i915: change intel_ddc_get_modes() function parameters + c1c4397 drm/i915: passing drm connector param for load detection + f1c79df drm/i915: Add new helper to return current attached encoder for connector + 5daa55e drm/i915: Add new 'intel_connector' structure + c5e4df3 drm/i915: more conversion from connector_list walk to encoder_list + 5bf4c9c drm/i915: use encoder_list for hotplug callback + 903cf20 drm/i915: Convert some trace events to DEFINE_TRACE + fb8b5a3 drm/i915: Configure the TV sense state correctly on GM45 to make TV detection reliable + a743374 drm/radeon: fix cypress firmware typo. + 0ca2ab5 drm/radeon/kms/evergreen: add hpd support + 45f9a39 drm/radeon/kms/evergreen: implement irq support + fe251e2 drm/radeon/kms/evergreen: setup and enable the CP + 32fcdbf drm/radeon/kms/evergreen: implement gfx init + 747943e drm/radeon/kms/evergreen: add soft reset function + 0fcdb61 drm/radeon/kms/evergreen: add gart support + 49f6598 drm/radeon/kms: add support for evergreen power tables + 08c5c51 drm/radeon/kms: update atombios.h power tables for evergreen + c385e50c drm/edid: Fix sync polarity for secondary GTF curve + 2125b8a drm/ttm: using kmalloc/kfree requires including slab.h + 9d87fa2 drm/ttm: split no_wait argument in 2 GPU or reserve wait + b1f2019 drm/fb: remove drm_fb_helper_setcolreg + 4cdc840 drm/ttm: include linux/seq_file.h for seq_printf + 4abe352 drm/kms/fb: use slow work mechanism for normal hotplug also. + 5c4426a drm/kms/fb: add polling support for when nothing is connected. + 19b4b44 drm/kms/fb: provide a 1024x768 fbcon if no outputs found. + 0b4c0f3 drm/kms/fb: separate fbdev connector list from core drm connectors + 8be48d9 drm/kms/fb: move to using fb helper crtc grouping instead of core crtc list + 3865167 drm/fb: fix fbdev object model + cleanup properly. + c96af79 drm/ttm: Add sysfs interface to control pool allocator. + 975efdb drm/ttm: Use set_pages_array_wc instead of set_memory_wc. + 4f64625 arch/x86: Add array variants for setting memory to wc caching. + bf62acd drm/nouveau: Add ttm page pool debugfs file. + 8d7cddc drm/radeon/kms: Add ttm page pool debugfs file. + 0745866 drm/ttm: Add debugfs output entry to pool allocator. + 1403b1a drm/ttm: add pool wc/uc page allocator V3 + 90aca4d drm/radeon/kms: simplify & improve GPU reset V2 + a2d07b7 drm/radeon/kms: rename gpu_reset to asic_reset + 225758d drm/radeon/kms: fence cleanup + more reliable GPU lockup detection V4 + 171fdd8 drm/modes: Fix interlaced mode names + 7a37435 drm/edid: Add secondary GTF curve support + 7ca6adb drm/edid: Strengthen the algorithm for standard mode codes + a0910c8 drm/edid: Fix the HDTV hack. + b17e52e drm/edid: Extend range-based mode addition for EDID 1.4 + d1ff640 drm/edid: Add test for monitor reduced blanking support. + a327f6b drm/edid: Fix preferred mode parse for EDID 1.4 + 59d8aff drm/edid: Remove some silly comments + 7466f4c drm/edid: Remove arbitrary EDID extension limit + 2255be1 drm/edid: Add modes for Established Timings III section + c867df7 drm/edid: Reshuffle mode list construction to closer match the spec + 2b470ab drm/edid: Remove a redundant check + fbcc06b drm/edid: Remove some misleading comments + 61e57a8 drm/edid: Fix secondary block fetch. + + Documentation/DocBook/Makefile | 2 +- + Documentation/DocBook/drm.tmpl | 839 ++++++++++ + arch/x86/include/asm/cacheflush.h | 2 + + arch/x86/mm/pageattr.c | 53 +- + drivers/char/agp/agp.h | 80 - + drivers/char/agp/ali-agp.c | 1 + + drivers/char/agp/amd-k7-agp.c | 9 + + drivers/char/agp/amd64-agp.c | 56 +- + drivers/char/agp/ati-agp.c | 8 + + drivers/char/agp/efficeon-agp.c | 1 + + drivers/char/agp/intel-agp.c | 1883 ++--------------------- + drivers/char/agp/intel-agp.h | 239 +++ + drivers/char/agp/intel-gtt.c | 1548 +++++++++++++++++++ + drivers/char/agp/nvidia-agp.c | 1 + + drivers/char/agp/sis-agp.c | 9 +- + drivers/char/agp/uninorth-agp.c | 16 +- + drivers/char/agp/via-agp.c | 2 + + drivers/gpu/drm/Kconfig | 4 + + drivers/gpu/drm/drm_auth.c | 3 +- + drivers/gpu/drm/drm_crtc.c | 13 +- + drivers/gpu/drm/drm_crtc_helper.c | 506 ++----- + drivers/gpu/drm/drm_dma.c | 4 +- + drivers/gpu/drm/drm_edid.c | 807 +++++++--- + drivers/gpu/drm/drm_fb_helper.c | 910 ++++++++---- + drivers/gpu/drm/drm_fops.c | 3 +- + drivers/gpu/drm/drm_gem.c | 49 +- + drivers/gpu/drm/drm_modes.c | 105 +- + drivers/gpu/drm/drm_sysfs.c | 2 +- + drivers/gpu/drm/i915/Makefile | 3 + + drivers/gpu/drm/i915/dvo.h | 10 - + drivers/gpu/drm/i915/dvo_ch7017.c | 46 +- + drivers/gpu/drm/i915/dvo_ch7xxx.c | 44 +- + drivers/gpu/drm/i915/dvo_ivch.c | 21 - + drivers/gpu/drm/i915/dvo_sil164.c | 38 - + drivers/gpu/drm/i915/dvo_tfp410.c | 32 - + drivers/gpu/drm/i915/i915_debugfs.c | 110 +- + drivers/gpu/drm/i915/i915_dma.c | 745 ++++++++-- + drivers/gpu/drm/i915/i915_drv.c | 99 +- + drivers/gpu/drm/i915/i915_drv.h | 246 ++-- + drivers/gpu/drm/i915/i915_gem.c | 1045 ++++++-------- + drivers/gpu/drm/i915/i915_gem_debug.c | 2 +- + drivers/gpu/drm/i915/i915_gem_tiling.c | 5 + + drivers/gpu/drm/i915/i915_irq.c | 205 ++-- + drivers/gpu/drm/i915/i915_reg.h | 225 +++- + drivers/gpu/drm/i915/i915_suspend.c | 41 +- + drivers/gpu/drm/i915/i915_trace.h | 112 +- + drivers/gpu/drm/i915/intel_bios.c | 11 + + drivers/gpu/drm/i915/intel_crt.c | 116 +- + drivers/gpu/drm/i915/intel_display.c | 1350 ++++++++++++----- + drivers/gpu/drm/i915/intel_dp.c | 263 ++-- + drivers/gpu/drm/i915/intel_drv.h | 31 +- + drivers/gpu/drm/i915/intel_dvo.c | 103 +- + drivers/gpu/drm/i915/intel_fb.c | 223 ++-- + drivers/gpu/drm/i915/intel_hdmi.c | 76 +- + drivers/gpu/drm/i915/intel_lvds.c | 111 +- + drivers/gpu/drm/i915/intel_modes.c | 21 +- + drivers/gpu/drm/i915/intel_overlay.c | 60 +- + drivers/gpu/drm/i915/intel_ringbuffer.c | 849 ++++++++++ + drivers/gpu/drm/i915/intel_ringbuffer.h | 124 ++ + drivers/gpu/drm/i915/intel_sdvo.c | 1009 ++++++------- + drivers/gpu/drm/i915/intel_tv.c | 185 +-- + drivers/gpu/drm/nouveau/Makefile | 3 +- + drivers/gpu/drm/nouveau/nouveau_acpi.c | 71 +- + drivers/gpu/drm/nouveau/nouveau_bios.c | 594 +++++--- + drivers/gpu/drm/nouveau/nouveau_bios.h | 1 + + drivers/gpu/drm/nouveau/nouveau_bo.c | 116 +- + drivers/gpu/drm/nouveau/nouveau_connector.c | 49 +- + drivers/gpu/drm/nouveau/nouveau_crtc.h | 2 + + drivers/gpu/drm/nouveau/nouveau_debugfs.c | 3 + + drivers/gpu/drm/nouveau/nouveau_display.c | 42 +- + drivers/gpu/drm/nouveau/nouveau_drv.c | 48 +- + drivers/gpu/drm/nouveau/nouveau_drv.h | 15 + + drivers/gpu/drm/nouveau/nouveau_encoder.h | 2 + + drivers/gpu/drm/nouveau/nouveau_fb.h | 6 +- + drivers/gpu/drm/nouveau/nouveau_fbcon.c | 265 ++-- + drivers/gpu/drm/nouveau/nouveau_fbcon.h | 19 +- + drivers/gpu/drm/nouveau/nouveau_gem.c | 5 +- + drivers/gpu/drm/nouveau/nouveau_grctx.c | 6 +- + drivers/gpu/drm/nouveau/nouveau_i2c.c | 21 +- + drivers/gpu/drm/nouveau/nouveau_irq.c | 10 +- + drivers/gpu/drm/nouveau/nouveau_mem.c | 3 +- + drivers/gpu/drm/nouveau/nouveau_reg.h | 1 + + drivers/gpu/drm/nouveau/nouveau_state.c | 83 +- + drivers/gpu/drm/nouveau/nv04_cursor.c | 1 + + drivers/gpu/drm/nouveau/nv04_fbcon.c | 18 +- + drivers/gpu/drm/nouveau/nv04_graph.c | 566 +++++++- + drivers/gpu/drm/nouveau/nv40_graph.c | 8 +- + drivers/gpu/drm/nouveau/nv40_grctx.c | 5 - + drivers/gpu/drm/nouveau/nv50_calc.c | 87 ++ + drivers/gpu/drm/nouveau/nv50_crtc.c | 46 +- + drivers/gpu/drm/nouveau/nv50_cursor.c | 1 + + drivers/gpu/drm/nouveau/nv50_display.c | 36 + + drivers/gpu/drm/nouveau/nv50_fb.c | 10 +- + drivers/gpu/drm/nouveau/nv50_fbcon.c | 16 +- + drivers/gpu/drm/nouveau/nv50_gpio.c | 2 +- + drivers/gpu/drm/nouveau/nv50_sor.c | 18 +- + drivers/gpu/drm/radeon/Makefile | 7 +- + drivers/gpu/drm/radeon/atombios.h | 76 +- + drivers/gpu/drm/radeon/atombios_crtc.c | 23 +- + drivers/gpu/drm/radeon/atombios_dp.c | 2 +- + drivers/gpu/drm/radeon/evergreen.c | 1562 ++++++++++++++++++- + drivers/gpu/drm/radeon/evergreen_cs.c | 1356 ++++++++++++++++ + drivers/gpu/drm/radeon/evergreen_reg.h | 7 + + drivers/gpu/drm/radeon/evergreend.h | 1020 ++++++++++++ + drivers/gpu/drm/radeon/r100.c | 739 +++++++--- + drivers/gpu/drm/radeon/r100d.h | 164 ++ + drivers/gpu/drm/radeon/r300.c | 151 +- + drivers/gpu/drm/radeon/r300d.h | 47 +- + drivers/gpu/drm/radeon/r420.c | 46 +- + drivers/gpu/drm/radeon/r500_reg.h | 3 + + drivers/gpu/drm/radeon/r520.c | 7 +- + drivers/gpu/drm/radeon/r600.c | 693 ++++++++- + drivers/gpu/drm/radeon/r600_audio.c | 58 +- + drivers/gpu/drm/radeon/r600_blit_kms.c | 3 + + drivers/gpu/drm/radeon/r600_hdmi.c | 65 +- + drivers/gpu/drm/radeon/r600_reg.h | 57 +- + drivers/gpu/drm/radeon/radeon.h | 265 +++- + drivers/gpu/drm/radeon/radeon_agp.c | 5 + + drivers/gpu/drm/radeon/radeon_asic.c | 144 ++- + drivers/gpu/drm/radeon/radeon_asic.h | 45 +- + drivers/gpu/drm/radeon/radeon_atombios.c | 321 +++- + drivers/gpu/drm/radeon/radeon_bios.c | 3 +- + drivers/gpu/drm/radeon/radeon_combios.c | 71 +- + drivers/gpu/drm/radeon/radeon_connectors.c | 63 +- + drivers/gpu/drm/radeon/radeon_cs.c | 4 - + drivers/gpu/drm/radeon/radeon_device.c | 72 +- + drivers/gpu/drm/radeon/radeon_display.c | 135 +- + drivers/gpu/drm/radeon/radeon_drv.c | 12 +- + drivers/gpu/drm/radeon/radeon_encoders.c | 44 +- + drivers/gpu/drm/radeon/radeon_fb.c | 364 +++-- + drivers/gpu/drm/radeon/radeon_fence.c | 107 +- + drivers/gpu/drm/radeon/radeon_fixed.h | 67 - + drivers/gpu/drm/radeon/radeon_gart.c | 2 +- + drivers/gpu/drm/radeon/radeon_gem.c | 6 +- + drivers/gpu/drm/radeon/radeon_irq_kms.c | 5 +- + drivers/gpu/drm/radeon/radeon_kms.c | 25 + + drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 14 +- + drivers/gpu/drm/radeon/radeon_legacy_encoders.c | 26 +- + drivers/gpu/drm/radeon/radeon_mode.h | 49 +- + drivers/gpu/drm/radeon/radeon_object.c | 44 +- + drivers/gpu/drm/radeon/radeon_object.h | 2 +- + drivers/gpu/drm/radeon/radeon_pm.c | 834 +++++++---- + drivers/gpu/drm/radeon/radeon_reg.h | 4 +- + drivers/gpu/drm/radeon/radeon_ring.c | 66 +- + drivers/gpu/drm/radeon/radeon_state.c | 5 +- + drivers/gpu/drm/radeon/radeon_ttm.c | 122 +- + drivers/gpu/drm/radeon/reg_srcs/evergreen | 611 ++++++++ + drivers/gpu/drm/radeon/rs400.c | 9 +- + drivers/gpu/drm/radeon/rs600.c | 232 +++- + drivers/gpu/drm/radeon/rs600d.h | 80 + + drivers/gpu/drm/radeon/rs690.c | 289 ++-- + drivers/gpu/drm/radeon/rv515.c | 287 ++--- + drivers/gpu/drm/radeon/rv515d.h | 46 + + drivers/gpu/drm/radeon/rv770.c | 39 +- + drivers/gpu/drm/savage/savage_bci.c | 3 +- + drivers/gpu/drm/ttm/Makefile | 2 +- + drivers/gpu/drm/ttm/ttm_bo.c | 98 +- + drivers/gpu/drm/ttm/ttm_bo_util.c | 122 +- + drivers/gpu/drm/ttm/ttm_bo_vm.c | 41 +- + drivers/gpu/drm/ttm/ttm_memory.c | 7 +- + drivers/gpu/drm/ttm/ttm_page_alloc.c | 855 ++++++++++ + drivers/gpu/drm/ttm/ttm_tt.c | 44 +- + drivers/gpu/drm/vmwgfx/Makefile | 2 +- + drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 50 +- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 24 +- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 49 +- + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 14 +- + drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 101 +- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 173 +++ + drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 23 +- + drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 17 +- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 209 ++- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 4 +- + drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 189 ++- + drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 4 +- + drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 8 +- + drivers/gpu/vga/Kconfig | 6 +- + drivers/gpu/vga/vgaarb.c | 61 +- + drivers/staging/Kconfig | 2 - + drivers/video/efifb.c | 11 +- + drivers/video/fbmem.c | 74 +- + drivers/video/fbsysfs.c | 1 + + drivers/video/offb.c | 28 +- + drivers/video/vesafb.c | 11 +- + drivers/video/vga16fb.c | 26 +- + include/drm/drmP.h | 3 + + include/drm/drm_crtc.h | 43 +- + include/drm/drm_crtc_helper.h | 12 +- + include/drm/drm_edid.h | 5 +- + include/drm/drm_fb_helper.h | 67 +- + include/drm/drm_fixed.h | 67 + + include/drm/i915_drm.h | 5 +- + include/drm/nouveau_drm.h | 1 + + include/drm/radeon_drm.h | 2 + + include/drm/ttm/ttm_bo_api.h | 46 +- + include/drm/ttm/ttm_bo_driver.h | 57 +- + include/drm/ttm/ttm_page_alloc.h | 74 + + include/drm/vmwgfx_drm.h | 26 + + include/linux/fb.h | 19 +- + include/linux/vgaarb.h | 21 + + 200 files changed, 21571 insertions(+), 8636 deletions(-) + +diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile +index 325cfd1..c7e5dc7 100644 +--- a/Documentation/DocBook/Makefile ++++ b/Documentation/DocBook/Makefile +@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \ + genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ + mac80211.xml debugobjects.xml sh.xml regulator.xml \ + alsa-driver-api.xml writing-an-alsa-driver.xml \ +- tracepoint.xml utrace.xml media.xml ++ tracepoint.xml utrace.xml media.xml drm.xml + + ### + # The build process is as follows (targets): +diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl +new file mode 100644 +index 0000000..910c923 +--- /dev/null ++++ b/Documentation/DocBook/drm.tmpl +@@ -0,0 +1,839 @@ ++ ++ ++ ++ ++ ++ Linux DRM Developer's Guide ++ ++ ++ 2008-2009 ++ ++ Intel Corporation (Jesse Barnes <jesse.barnes@intel.com>) ++ ++ ++ ++ ++ ++ The contents of this file may be used under the terms of the GNU ++ General Public License version 2 (the "GPL") as distributed in ++ the kernel source COPYING file. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ Introduction ++ ++ The Linux DRM layer contains code intended to support the needs ++ of complex graphics devices, usually containing programmable ++ pipelines well suited to 3D graphics acceleration. Graphics ++ drivers in the kernel can make use of DRM functions to make ++ tasks like memory management, interrupt handling and DMA easier, ++ and provide a uniform interface to applications. ++ ++ ++ A note on versions: this guide covers features found in the DRM ++ tree, including the TTM memory manager, output configuration and ++ mode setting, and the new vblank internals, in addition to all ++ the regular features found in current kernels. ++ ++ ++ [Insert diagram of typical DRM stack here] ++ ++ ++ ++ ++ ++ ++ DRM Internals ++ ++ This chapter documents DRM internals relevant to driver authors ++ and developers working to add support for the latest features to ++ existing drivers. ++ ++ ++ First, we'll go over some typical driver initialization ++ requirements, like setting up command buffers, creating an ++ initial output configuration, and initializing core services. ++ Subsequent sections will cover core internals in more detail, ++ providing implementation notes and examples. ++ ++ ++ The DRM layer provides several services to graphics drivers, ++ many of them driven by the application interfaces it provides ++ through libdrm, the library that wraps most of the DRM ioctls. ++ These include vblank event handling, memory ++ management, output management, framebuffer management, command ++ submission & fencing, suspend/resume support, and DMA ++ services. ++ ++ ++ The core of every DRM driver is struct drm_device. Drivers ++ will typically statically initialize a drm_device structure, ++ then pass it to drm_init() at load time. ++ ++ ++ ++ ++ ++ Driver initialization ++ ++ Before calling the DRM initialization routines, the driver must ++ first create and fill out a struct drm_device structure. ++ ++ ++ static struct drm_driver driver = { ++ /* don't use mtrr's here, the Xserver or user space app should ++ * deal with them for intel hardware. ++ */ ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, ++ .load = i915_driver_load, ++ .unload = i915_driver_unload, ++ .firstopen = i915_driver_firstopen, ++ .lastclose = i915_driver_lastclose, ++ .preclose = i915_driver_preclose, ++ .save = i915_save, ++ .restore = i915_restore, ++ .device_is_agp = i915_driver_device_is_agp, ++ .get_vblank_counter = i915_get_vblank_counter, ++ .enable_vblank = i915_enable_vblank, ++ .disable_vblank = i915_disable_vblank, ++ .irq_preinstall = i915_driver_irq_preinstall, ++ .irq_postinstall = i915_driver_irq_postinstall, ++ .irq_uninstall = i915_driver_irq_uninstall, ++ .irq_handler = i915_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .fb_probe = intelfb_probe, ++ .fb_remove = intelfb_remove, ++ .fb_resize = intelfb_resize, ++ .master_create = i915_master_create, ++ .master_destroy = i915_master_destroy, ++#if defined(CONFIG_DEBUG_FS) ++ .debugfs_init = i915_debugfs_init, ++ .debugfs_cleanup = i915_debugfs_cleanup, ++#endif ++ .gem_init_object = i915_gem_init_object, ++ .gem_free_object = i915_gem_free_object, ++ .gem_vm_ops = &i915_gem_vm_ops, ++ .ioctls = i915_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = i915_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++ }; ++ ++ ++ In the example above, taken from the i915 DRM driver, the driver ++ sets several flags indicating what core features it supports. ++ We'll go over the individual callbacks in later sections. Since ++ flags indicate which features your driver supports to the DRM ++ core, you need to set most of them prior to calling drm_init(). Some, ++ like DRIVER_MODESET can be set later based on user supplied parameters, ++ but that's the exception rather than the rule. ++ ++ ++ Driver flags ++ ++ DRIVER_USE_AGP ++ ++ Driver uses AGP interface ++ ++ ++ ++ DRIVER_REQUIRE_AGP ++ ++ Driver needs AGP interface to function. ++ ++ ++ ++ DRIVER_USE_MTRR ++ ++ ++ Driver uses MTRR interface for mapping memory. Deprecated. ++ ++ ++ ++ ++ DRIVER_PCI_DMA ++ ++ Driver is capable of PCI DMA. Deprecated. ++ ++ ++ ++ DRIVER_SG ++ ++ Driver can perform scatter/gather DMA. Deprecated. ++ ++ ++ ++ DRIVER_HAVE_DMA ++ Driver supports DMA. Deprecated. ++ ++ ++ DRIVER_HAVE_IRQDRIVER_IRQ_SHARED ++ ++ ++ DRIVER_HAVE_IRQ indicates whether the driver has a IRQ ++ handler, DRIVER_IRQ_SHARED indicates whether the device & ++ handler support shared IRQs (note that this is required of ++ PCI drivers). ++ ++ ++ ++ ++ DRIVER_DMA_QUEUE ++ ++ ++ If the driver queues DMA requests and completes them ++ asynchronously, this flag should be set. Deprecated. ++ ++ ++ ++ ++ DRIVER_FB_DMA ++ ++ ++ Driver supports DMA to/from the framebuffer. Deprecated. ++ ++ ++ ++ ++ DRIVER_MODESET ++ ++ ++ Driver supports mode setting interfaces. ++ ++ ++ ++ ++ ++ In this specific case, the driver requires AGP and supports ++ IRQs. DMA, as we'll see, is handled by device specific ioctls ++ in this case. It also supports the kernel mode setting APIs, though ++ unlike in the actual i915 driver source, this example unconditionally ++ exports KMS capability. ++ ++ ++ ++ ++ ++ ++ Driver load ++ ++ In the previous section, we saw what a typical drm_driver ++ structure might look like. One of the more important fields in ++ the structure is the hook for the load function. ++ ++ ++ static struct drm_driver driver = { ++ ... ++ .load = i915_driver_load, ++ ... ++ }; ++ ++ ++ The load function has many responsibilities: allocating a driver ++ private structure, specifying supported performance counters, ++ configuring the device (e.g. mapping registers & command ++ buffers), initializing the memory manager, and setting up the ++ initial output configuration. ++ ++ ++ Note that the tasks performed at driver load time must not ++ conflict with DRM client requirements. For instance, if user ++ level mode setting drivers are in use, it would be problematic ++ to perform output discovery & configuration at load time. ++ Likewise, if pre-memory management aware user level drivers are ++ in use, memory management and command buffer setup may need to ++ be omitted. These requirements are driver specific, and care ++ needs to be taken to keep both old and new applications and ++ libraries working. The i915 driver supports the "modeset" ++ module parameter to control whether advanced features are ++ enabled at load time or in legacy fashion. If compatibility is ++ a concern (e.g. with drivers converted over to the new interfaces ++ from the old ones), care must be taken to prevent incompatible ++ device initialization and control with the currently active ++ userspace drivers. ++ ++ ++ ++ Driver private & performance counters ++ ++ The driver private hangs off the main drm_device structure and ++ can be used for tracking various device specific bits of ++ information, like register offsets, command buffer status, ++ register state for suspend/resume, etc. At load time, a ++ driver can simply allocate one and set drm_device.dev_priv ++ appropriately; at unload the driver can free it and set ++ drm_device.dev_priv to NULL. ++ ++ ++ The DRM supports several counters which can be used for rough ++ performance characterization. Note that the DRM stat counter ++ system is not often used by applications, and supporting ++ additional counters is completely optional. ++ ++ ++ These interfaces are deprecated and should not be used. If performance ++ monitoring is desired, the developer should investigate and ++ potentially enhance the kernel perf and tracing infrastructure to export ++ GPU related performance information to performance monitoring ++ tools and applications. ++ ++ ++ ++ ++ Configuring the device ++ ++ Obviously, device configuration will be device specific. ++ However, there are several common operations: finding a ++ device's PCI resources, mapping them, and potentially setting ++ up an IRQ handler. ++ ++ ++ Finding & mapping resources is fairly straightforward. The ++ DRM wrapper functions, drm_get_resource_start() and ++ drm_get_resource_len() can be used to find BARs on the given ++ drm_device struct. Once those values have been retrieved, the ++ driver load function can call drm_addmap() to create a new ++ mapping for the BAR in question. Note you'll probably want a ++ drm_local_map_t in your driver private structure to track any ++ mappings you create. ++ ++ ++ ++ ++ if compatibility with other operating systems isn't a concern ++ (DRM drivers can run under various BSD variants and OpenSolaris), ++ native Linux calls can be used for the above, e.g. pci_resource_* ++ and iomap*/iounmap. See the Linux device driver book for more ++ info. ++ ++ ++ Once you have a register map, you can use the DRM_READn() and ++ DRM_WRITEn() macros to access the registers on your device, or ++ use driver specific versions to offset into your MMIO space ++ relative to a driver specific base pointer (see I915_READ for ++ example). ++ ++ ++ If your device supports interrupt generation, you may want to ++ setup an interrupt handler at driver load time as well. This ++ is done using the drm_irq_install() function. If your device ++ supports vertical blank interrupts, it should call ++ drm_vblank_init() to initialize the core vblank handling code before ++ enabling interrupts on your device. This ensures the vblank related ++ structures are allocated and allows the core to handle vblank events. ++ ++ ++ ++ Once your interrupt handler is registered (it'll use your ++ drm_driver.irq_handler as the actual interrupt handling ++ function), you can safely enable interrupts on your device, ++ assuming any other state your interrupt handler uses is also ++ initialized. ++ ++ ++ Another task that may be necessary during configuration is ++ mapping the video BIOS. On many devices, the VBIOS describes ++ device configuration, LCD panel timings (if any), and contains ++ flags indicating device state. Mapping the BIOS can be done ++ using the pci_map_rom() call, a convenience function that ++ takes care of mapping the actual ROM, whether it has been ++ shadowed into memory (typically at address 0xc0000) or exists ++ on the PCI device in the ROM BAR. Note that once you've ++ mapped the ROM and extracted any necessary information, be ++ sure to unmap it; on many devices the ROM address decoder is ++ shared with other BARs, so leaving it mapped can cause ++ undesired behavior like hangs or memory corruption. ++ ++ ++ ++ ++ ++ Memory manager initialization ++ ++ In order to allocate command buffers, cursor memory, scanout ++ buffers, etc., as well as support the latest features provided ++ by packages like Mesa and the X.Org X server, your driver ++ should support a memory manager. ++ ++ ++ If your driver supports memory management (it should!), you'll ++ need to set that up at load time as well. How you initialize ++ it depends on which memory manager you're using, TTM or GEM. ++ ++ ++ TTM initialization ++ ++ TTM (for Translation Table Manager) manages video memory and ++ aperture space for graphics devices. TTM supports both UMA devices ++ and devices with dedicated video RAM (VRAM), i.e. most discrete ++ graphics devices. If your device has dedicated RAM, supporting ++ TTM is desirable. TTM also integrates tightly with your ++ driver specific buffer execution function. See the radeon ++ driver for examples. ++ ++ ++ The core TTM structure is the ttm_bo_driver struct. It contains ++ several fields with function pointers for initializing the TTM, ++ allocating and freeing memory, waiting for command completion ++ and fence synchronization, and memory migration. See the ++ radeon_ttm.c file for an example of usage. ++ ++ ++ The ttm_global_reference structure is made up of several fields: ++ ++ ++ struct ttm_global_reference { ++ enum ttm_global_types global_type; ++ size_t size; ++ void *object; ++ int (*init) (struct ttm_global_reference *); ++ void (*release) (struct ttm_global_reference *); ++ }; ++ ++ ++ There should be one global reference structure for your memory ++ manager as a whole, and there will be others for each object ++ created by the memory manager at runtime. Your global TTM should ++ have a type of TTM_GLOBAL_TTM_MEM. The size field for the global ++ object should be sizeof(struct ttm_mem_global), and the init and ++ release hooks should point at your driver specific init and ++ release routines, which will probably eventually call ++ ttm_mem_global_init and ttm_mem_global_release respectively. ++ ++ ++ Once your global TTM accounting structure is set up and initialized ++ (done by calling ttm_global_item_ref on the global object you ++ just created), you'll need to create a buffer object TTM to ++ provide a pool for buffer object allocation by clients and the ++ kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, ++ and its size should be sizeof(struct ttm_bo_global). Again, ++ driver specific init and release functions can be provided, ++ likely eventually calling ttm_bo_global_init and ++ ttm_bo_global_release, respectively. Also like the previous ++ object, ttm_global_item_ref is used to create an initial reference ++ count for the TTM, which will call your initialization function. ++ ++ ++ ++ GEM initialization ++ ++ GEM is an alternative to TTM, designed specifically for UMA ++ devices. It has simpler initialization and execution requirements ++ than TTM, but has no VRAM management capability. Core GEM ++ initialization is comprised of a basic drm_mm_init call to create ++ a GTT DRM MM object, which provides an address space pool for ++ object allocation. In a KMS configuration, the driver will ++ need to allocate and initialize a command ring buffer following ++ basic GEM initialization. Most UMA devices have a so-called ++ "stolen" memory region, which provides space for the initial ++ framebuffer and large, contiguous memory regions required by the ++ device. This space is not typically managed by GEM, and must ++ be initialized separately into its own DRM MM object. ++ ++ ++ Initialization will be driver specific, and will depend on ++ the architecture of the device. In the case of Intel ++ integrated graphics chips like 965GM, GEM initialization can ++ be done by calling the internal GEM init function, ++ i915_gem_do_init(). Since the 965GM is a UMA device ++ (i.e. it doesn't have dedicated VRAM), GEM will manage ++ making regular RAM available for GPU operations. Memory set ++ aside by the BIOS (called "stolen" memory by the i915 ++ driver) will be managed by the DRM memrange allocator; the ++ rest of the aperture will be managed by GEM. ++ ++ /* Basic memrange allocator for stolen space (aka vram) */ ++ drm_memrange_init(&dev_priv->vram, 0, prealloc_size); ++ /* Let GEM Manage from end of prealloc space to end of aperture */ ++ i915_gem_do_init(dev, prealloc_size, agp_size); ++ ++ ++ ++ ++ Once the memory manager has been set up, we can allocate the ++ command buffer. In the i915 case, this is also done with a ++ GEM function, i915_gem_init_ringbuffer(). ++ ++ ++ ++ ++ ++ Output configuration ++ ++ The final initialization task is output configuration. This involves ++ finding and initializing the CRTCs, encoders and connectors ++ for your device, creating an initial configuration and ++ registering a framebuffer console driver. ++ ++ ++ Output discovery and initialization ++ ++ Several core functions exist to create CRTCs, encoders and ++ connectors, namely drm_crtc_init(), drm_connector_init() and ++ drm_encoder_init(), along with several "helper" functions to ++ perform common tasks. ++ ++ ++ Connectors should be registered with sysfs once they've been ++ detected and initialized, using the ++ drm_sysfs_connector_add() function. Likewise, when they're ++ removed from the system, they should be destroyed with ++ drm_sysfs_connector_remove(). ++ ++ ++base; ++ drm_connector_init(dev, &intel_output->base, ++ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); ++ ++ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, ++ DRM_MODE_ENCODER_DAC); ++ ++ drm_mode_connector_attach_encoder(&intel_output->base, ++ &intel_output->enc); ++ ++ /* Set up the DDC bus. */ ++ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); ++ if (!intel_output->ddc_bus) { ++ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " ++ "failed.\n"); ++ return; ++ } ++ ++ intel_output->type = INTEL_OUTPUT_ANALOG; ++ connector->interlace_allowed = 0; ++ connector->doublescan_allowed = 0; ++ ++ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); ++ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); ++ ++ drm_sysfs_connector_add(connector); ++} ++]]> ++ ++ ++ In the example above (again, taken from the i915 driver), a ++ CRT connector and encoder combination is created. A device ++ specific i2c bus is also created, for fetching EDID data and ++ performing monitor detection. Once the process is complete, ++ the new connector is registered with sysfs, to make its ++ properties available to applications. ++ ++ ++ Helper functions and core functions ++ ++ Since many PC-class graphics devices have similar display output ++ designs, the DRM provides a set of helper functions to make ++ output management easier. The core helper routines handle ++ encoder re-routing and disabling of unused functions following ++ mode set. Using the helpers is optional, but recommended for ++ devices with PC-style architectures (i.e. a set of display planes ++ for feeding pixels to encoders which are in turn routed to ++ connectors). Devices with more complex requirements needing ++ finer grained management can opt to use the core callbacks ++ directly. ++ ++ ++ [Insert typical diagram here.] [Insert OMAP style config here.] ++ ++ ++ ++ For each encoder, CRTC and connector, several functions must ++ be provided, depending on the object type. Encoder objects ++ need to provide a DPMS (basically on/off) function, mode fixup ++ (for converting requested modes into native hardware timings), ++ and prepare, set and commit functions for use by the core DRM ++ helper functions. Connector helpers need to provide mode fetch and ++ validity functions as well as an encoder matching function for ++ returning an ideal encoder for a given connector. The core ++ connector functions include a DPMS callback, (deprecated) ++ save/restore routines, detection, mode probing, property handling, ++ and cleanup functions. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ VBlank event handling ++ ++ The DRM core exposes two vertical blank related ioctls: ++ DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL. ++ ++ ++ ++ DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure ++ as its argument, and is used to block or request a signal when a ++ specified vblank event occurs. ++ ++ ++ DRM_IOCTL_MODESET_CTL should be called by application level ++ drivers before and after mode setting, since on many devices the ++ vertical blank counter will be reset at that time. Internally, ++ the DRM snapshots the last vblank count when the ioctl is called ++ with the _DRM_PRE_MODESET command so that the counter won't go ++ backwards (which is dealt with when _DRM_POST_MODESET is used). ++ ++ ++ To support the functions above, the DRM core provides several ++ helper functions for tracking vertical blank counters, and ++ requires drivers to provide several callbacks: ++ get_vblank_counter(), enable_vblank() and disable_vblank(). The ++ core uses get_vblank_counter() to keep the counter accurate ++ across interrupt disable periods. It should return the current ++ vertical blank event count, which is often tracked in a device ++ register. The enable and disable vblank callbacks should enable ++ and disable vertical blank interrupts, respectively. In the ++ absence of DRM clients waiting on vblank events, the core DRM ++ code will use the disable_vblank() function to disable ++ interrupts, which saves power. They'll be re-enabled again when ++ a client calls the vblank wait ioctl above. ++ ++ ++ Devices that don't provide a count register can simply use an ++ internal atomic counter incremented on every vertical blank ++ interrupt, and can make their enable and disable vblank ++ functions into no-ops. ++ ++ ++ ++ ++ Memory management ++ ++ The memory manager lies at the heart of many DRM operations, and ++ is also required to support advanced client features like OpenGL ++ pbuffers. The DRM currently contains two memory managers, TTM ++ and GEM. ++ ++ ++ ++ The Translation Table Manager (TTM) ++ ++ TTM was developed by Tungsten Graphics, primarily by Thomas ++ Hellström, and is intended to be a flexible, high performance ++ graphics memory manager. ++ ++ ++ Drivers wishing to support TTM must fill out a drm_bo_driver ++ structure. ++ ++ ++ TTM design background and information belongs here. ++ ++ ++ ++ ++ The Graphics Execution Manager (GEM) ++ ++ GEM is an Intel project, authored by Eric Anholt and Keith ++ Packard. It provides simpler interfaces than TTM, and is well ++ suited for UMA devices. ++ ++ ++ GEM-enabled drivers must provide gem_init_object() and ++ gem_free_object() callbacks to support the core memory ++ allocation routines. They should also provide several driver ++ specific ioctls to support command execution, pinning, buffer ++ read & write, mapping, and domain ownership transfers. ++ ++ ++ On a fundamental level, GEM involves several operations: memory ++ allocation and freeing, command execution, and aperture management ++ at command execution time. Buffer object allocation is relatively ++ straightforward and largely provided by Linux's shmem layer, which ++ provides memory to back each object. When mapped into the GTT ++ or used in a command buffer, the backing pages for an object are ++ flushed to memory and marked write combined so as to be coherent ++ with the GPU. Likewise, when the GPU finishes rendering to an object, ++ if the CPU accesses it, it must be made coherent with the CPU's view ++ of memory, usually involving GPU cache flushing of various kinds. ++ This core CPU<->GPU coherency management is provided by the GEM ++ set domain function, which evaluates an object's current domain and ++ performs any necessary flushing or synchronization to put the object ++ into the desired coherency domain (note that the object may be busy, ++ i.e. an active render target; in that case the set domain function ++ will block the client and wait for rendering to complete before ++ performing any necessary flushing operations). ++ ++ ++ Perhaps the most important GEM function is providing a command ++ execution interface to clients. Client programs construct command ++ buffers containing references to previously allocated memory objects ++ and submit them to GEM. At that point, GEM will take care to bind ++ all the objects into the GTT, execute the buffer, and provide ++ necessary synchronization between clients accessing the same buffers. ++ This often involves evicting some objects from the GTT and re-binding ++ others (a fairly expensive operation), and providing relocation ++ support which hides fixed GTT offsets from clients. Clients must ++ take care not to submit command buffers that reference more objects ++ than can fit in the GTT or GEM will reject them and no rendering ++ will occur. Similarly, if several objects in the buffer require ++ fence registers to be allocated for correct rendering (e.g. 2D blits ++ on pre-965 chips), care must be taken not to require more fence ++ registers than are available to the client. Such resource management ++ should be abstracted from the client in libdrm. ++ ++ ++ ++ ++ ++ ++ ++ Output management ++ ++ At the core of the DRM output management code is a set of ++ structures representing CRTCs, encoders and connectors. ++ ++ ++ A CRTC is an abstraction representing a part of the chip that ++ contains a pointer to a scanout buffer. Therefore, the number ++ of CRTCs available determines how many independent scanout ++ buffers can be active at any given time. The CRTC structure ++ contains several fields to support this: a pointer to some video ++ memory, a display mode, and an (x, y) offset into the video ++ memory to support panning or configurations where one piece of ++ video memory spans multiple CRTCs. ++ ++ ++ An encoder takes pixel data from a CRTC and converts it to a ++ format suitable for any attached connectors. On some devices, ++ it may be possible to have a CRTC send data to more than one ++ encoder. In that case, both encoders would receive data from ++ the same scanout buffer, resulting in a "cloned" display ++ configuration across the connectors attached to each encoder. ++ ++ ++ A connector is the final destination for pixel data on a device, ++ and usually connects directly to an external display device like ++ a monitor or laptop panel. A connector can only be attached to ++ one encoder at a time. The connector is also the structure ++ where information about the attached display is kept, so it ++ contains fields for display data, EDID data, DPMS & ++ connection status, and information about modes supported on the ++ attached displays. ++ ++ ++ ++ ++ ++ Framebuffer management ++ ++ In order to set a mode on a given CRTC, encoder and connector ++ configuration, clients need to provide a framebuffer object which ++ will provide a source of pixels for the CRTC to deliver to the encoder(s) ++ and ultimately the connector(s) in the configuration. A framebuffer ++ is fundamentally a driver specific memory object, made into an opaque ++ handle by the DRM addfb function. Once an fb has been created this ++ way it can be passed to the KMS mode setting routines for use in ++ a configuration. ++ ++ ++ ++ ++ Command submission & fencing ++ ++ This should cover a few device specific command submission ++ implementations. ++ ++ ++ ++ ++ Suspend/resume ++ ++ The DRM core provides some suspend/resume code, but drivers ++ wanting full suspend/resume support should provide save() and ++ restore() functions. These will be called at suspend, ++ hibernate, or resume time, and should perform any state save or ++ restore required by your device across suspend or hibernate ++ states. ++ ++ ++ ++ ++ DMA services ++ ++ This should cover how DMA mapping etc. is supported by the core. ++ These functions are deprecated and should not be used. ++ ++ ++ ++ ++ ++ ++ ++ Userland interfaces ++ ++ The DRM core exports several interfaces to applications, ++ generally intended to be used through corresponding libdrm ++ wrapper functions. In addition, drivers export device specific ++ interfaces for use by userspace drivers & device aware ++ applications through ioctls and sysfs files. ++ ++ ++ External interfaces include: memory mapping, context management, ++ DMA operations, AGP management, vblank control, fence ++ management, memory management, and output management. ++ ++ ++ Cover generic ioctls and sysfs layout here. Only need high ++ level info, since man pages will cover the rest. ++ ++ ++ ++ ++ ++ ++ DRM Driver API ++ ++ Include auto-generated API reference here (need to reference it ++ from paragraphs above too). ++ ++ ++ ++ +diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h +index 634c40a..d92d63a 100644 +--- a/arch/x86/include/asm/cacheflush.h ++++ b/arch/x86/include/asm/cacheflush.h +@@ -139,9 +139,11 @@ int set_memory_np(unsigned long addr, int numpages); + int set_memory_4k(unsigned long addr, int numpages); + + int set_memory_array_uc(unsigned long *addr, int addrinarray); ++int set_memory_array_wc(unsigned long *addr, int addrinarray); + int set_memory_array_wb(unsigned long *addr, int addrinarray); + + int set_pages_array_uc(struct page **pages, int addrinarray); ++int set_pages_array_wc(struct page **pages, int addrinarray); + int set_pages_array_wb(struct page **pages, int addrinarray); + + /* +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index 28195c3..532e793 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -997,7 +997,8 @@ out_err: + } + EXPORT_SYMBOL(set_memory_uc); + +-int set_memory_array_uc(unsigned long *addr, int addrinarray) ++int _set_memory_array(unsigned long *addr, int addrinarray, ++ unsigned long new_type) + { + int i, j; + int ret; +@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray) + */ + for (i = 0; i < addrinarray; i++) { + ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, +- _PAGE_CACHE_UC_MINUS, NULL); ++ new_type, NULL); + if (ret) + goto out_free; + } + + ret = change_page_attr_set(addr, addrinarray, + __pgprot(_PAGE_CACHE_UC_MINUS), 1); ++ ++ if (!ret && new_type == _PAGE_CACHE_WC) ++ ret = change_page_attr_set_clr(addr, addrinarray, ++ __pgprot(_PAGE_CACHE_WC), ++ __pgprot(_PAGE_CACHE_MASK), ++ 0, CPA_ARRAY, NULL); + if (ret) + goto out_free; + +@@ -1025,8 +1032,19 @@ out_free: + + return ret; + } ++ ++int set_memory_array_uc(unsigned long *addr, int addrinarray) ++{ ++ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); ++} + EXPORT_SYMBOL(set_memory_array_uc); + ++int set_memory_array_wc(unsigned long *addr, int addrinarray) ++{ ++ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); ++} ++EXPORT_SYMBOL(set_memory_array_wc); ++ + int _set_memory_wc(unsigned long addr, int numpages) + { + int ret; +@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages) + } + EXPORT_SYMBOL(set_pages_uc); + +-int set_pages_array_uc(struct page **pages, int addrinarray) ++static int _set_pages_array(struct page **pages, int addrinarray, ++ unsigned long new_type) + { + unsigned long start; + unsigned long end; + int i; + int free_idx; ++ int ret; + + for (i = 0; i < addrinarray; i++) { + if (PageHighMem(pages[i])) + continue; + start = page_to_pfn(pages[i]) << PAGE_SHIFT; + end = start + PAGE_SIZE; +- if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) ++ if (reserve_memtype(start, end, new_type, NULL)) + goto err_out; + } + +- if (cpa_set_pages_array(pages, addrinarray, +- __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) { +- return 0; /* Success */ +- } ++ ret = cpa_set_pages_array(pages, addrinarray, ++ __pgprot(_PAGE_CACHE_UC_MINUS)); ++ if (!ret && new_type == _PAGE_CACHE_WC) ++ ret = change_page_attr_set_clr(NULL, addrinarray, ++ __pgprot(_PAGE_CACHE_WC), ++ __pgprot(_PAGE_CACHE_MASK), ++ 0, CPA_PAGES_ARRAY, pages); ++ if (ret) ++ goto err_out; ++ return 0; /* Success */ + err_out: + free_idx = i; + for (i = 0; i < free_idx; i++) { +@@ -1184,8 +1210,19 @@ err_out: + } + return -EINVAL; + } ++ ++int set_pages_array_uc(struct page **pages, int addrinarray) ++{ ++ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); ++} + EXPORT_SYMBOL(set_pages_array_uc); + ++int set_pages_array_wc(struct page **pages, int addrinarray) ++{ ++ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); ++} ++EXPORT_SYMBOL(set_pages_array_wc); ++ + int set_pages_wb(struct page *page, int numpages) + { + unsigned long addr = (unsigned long)page_address(page); +diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h +index 870f12c..1204909 100644 +--- a/drivers/char/agp/agp.h ++++ b/drivers/char/agp/agp.h +@@ -178,86 +178,6 @@ struct agp_bridge_data { + #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) + + +-/* Intel registers */ +-#define INTEL_APSIZE 0xb4 +-#define INTEL_ATTBASE 0xb8 +-#define INTEL_AGPCTRL 0xb0 +-#define INTEL_NBXCFG 0x50 +-#define INTEL_ERRSTS 0x91 +- +-/* Intel i830 registers */ +-#define I830_GMCH_CTRL 0x52 +-#define I830_GMCH_ENABLED 0x4 +-#define I830_GMCH_MEM_MASK 0x1 +-#define I830_GMCH_MEM_64M 0x1 +-#define I830_GMCH_MEM_128M 0 +-#define I830_GMCH_GMS_MASK 0x70 +-#define I830_GMCH_GMS_DISABLED 0x00 +-#define I830_GMCH_GMS_LOCAL 0x10 +-#define I830_GMCH_GMS_STOLEN_512 0x20 +-#define I830_GMCH_GMS_STOLEN_1024 0x30 +-#define I830_GMCH_GMS_STOLEN_8192 0x40 +-#define I830_RDRAM_CHANNEL_TYPE 0x03010 +-#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) +-#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) +- +-/* This one is for I830MP w. an external graphic card */ +-#define INTEL_I830_ERRSTS 0x92 +- +-/* Intel 855GM/852GM registers */ +-#define I855_GMCH_GMS_MASK 0xF0 +-#define I855_GMCH_GMS_STOLEN_0M 0x0 +-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) +-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) +-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) +-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) +-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) +-#define I85X_CAPID 0x44 +-#define I85X_VARIANT_MASK 0x7 +-#define I85X_VARIANT_SHIFT 5 +-#define I855_GME 0x0 +-#define I855_GM 0x4 +-#define I852_GME 0x2 +-#define I852_GM 0x5 +- +-/* Intel i845 registers */ +-#define INTEL_I845_AGPM 0x51 +-#define INTEL_I845_ERRSTS 0xc8 +- +-/* Intel i860 registers */ +-#define INTEL_I860_MCHCFG 0x50 +-#define INTEL_I860_ERRSTS 0xc8 +- +-/* Intel i810 registers */ +-#define I810_GMADDR 0x10 +-#define I810_MMADDR 0x14 +-#define I810_PTE_BASE 0x10000 +-#define I810_PTE_MAIN_UNCACHED 0x00000000 +-#define I810_PTE_LOCAL 0x00000002 +-#define I810_PTE_VALID 0x00000001 +-#define I830_PTE_SYSTEM_CACHED 0x00000006 +-#define I810_SMRAM_MISCC 0x70 +-#define I810_GFX_MEM_WIN_SIZE 0x00010000 +-#define I810_GFX_MEM_WIN_32M 0x00010000 +-#define I810_GMS 0x000000c0 +-#define I810_GMS_DISABLE 0x00000000 +-#define I810_PGETBL_CTL 0x2020 +-#define I810_PGETBL_ENABLED 0x00000001 +-#define I965_PGETBL_SIZE_MASK 0x0000000e +-#define I965_PGETBL_SIZE_512KB (0 << 1) +-#define I965_PGETBL_SIZE_256KB (1 << 1) +-#define I965_PGETBL_SIZE_128KB (2 << 1) +-#define I965_PGETBL_SIZE_1MB (3 << 1) +-#define I965_PGETBL_SIZE_2MB (4 << 1) +-#define I965_PGETBL_SIZE_1_5MB (5 << 1) +-#define G33_PGETBL_SIZE_MASK (3 << 8) +-#define G33_PGETBL_SIZE_1M (1 << 8) +-#define G33_PGETBL_SIZE_2M (2 << 8) +- +-#define I810_DRAM_CTL 0x3000 +-#define I810_DRAM_ROW_0 0x00000001 +-#define I810_DRAM_ROW_0_SDRAM 0x00000001 +- + struct agp_device_ids { + unsigned short device_id; /* first, to make table easier to read */ + enum chipset_type chipset; +diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c +index d2ce68f..fd79351 100644 +--- a/drivers/char/agp/ali-agp.c ++++ b/drivers/char/agp/ali-agp.c +@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = { + .aperture_sizes = ali_generic_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = ali_configure, + .fetch_size = ali_fetch_size, + .cleanup = ali_cleanup, +diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c +index a7637d7..b6b1568 100644 +--- a/drivers/char/agp/amd-k7-agp.c ++++ b/drivers/char/agp/amd-k7-agp.c +@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) + { + struct aper_size_info_lvl2 *value; + struct amd_page_map page_dir; ++ unsigned long __iomem *cur_gatt; + unsigned long addr; + int retval; + u32 temp; +@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) + readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ + } + ++ for (i = 0; i < value->num_entries; i++) { ++ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; ++ cur_gatt = GET_GATT(addr); ++ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); ++ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ ++ } ++ + return 0; + } + +@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = { + .aperture_sizes = amd_irongate_sizes, + .size_type = LVL2_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = amd_irongate_configure, + .fetch_size = amd_irongate_fetch_size, + .cleanup = amd_irongate_cleanup, +diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c +index fd50ead..70312da 100644 +--- a/drivers/char/agp/amd64-agp.c ++++ b/drivers/char/agp/amd64-agp.c +@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = { + .aperture_sizes = amd_8151_sizes, + .size_type = U32_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = amd_8151_configure, + .fetch_size = amd64_fetch_size, + .cleanup = amd64_cleanup, +@@ -383,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) + { + u32 httfea,baseaddr,enuscr; + struct pci_dev *dev1; +- int i; ++ int i, ret; + unsigned size = amd64_fetch_size(); + + dev_info(&pdev->dev, "setting up ULi AGP\n"); +@@ -399,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) + + if (i == ARRAY_SIZE(uli_sizes)) { + dev_info(&pdev->dev, "no ULi size found for %d\n", size); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put; + } + + /* shadow x86-64 registers into ULi registers */ + pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); + + /* if x86-64 aperture base is beyond 4G, exit here */ +- if ((httfea & 0x7fff) >> (32 - 25)) +- return -ENODEV; ++ if ((httfea & 0x7fff) >> (32 - 25)) { ++ ret = -ENODEV; ++ goto put; ++ } + + httfea = (httfea& 0x7fff) << 25; + +@@ -419,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) + enuscr= httfea+ (size * 1024 * 1024) - 1; + pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); + pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); +- ++ ret = 0; ++put: + pci_dev_put(dev1); +- return 0; ++ return ret; + } + + +@@ -440,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) + { + u32 tmp, apbase, apbar, aplimit; + struct pci_dev *dev1; +- int i; ++ int i, ret; + unsigned size = amd64_fetch_size(); + + dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); +@@ -457,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) + + if (i == ARRAY_SIZE(nforce3_sizes)) { + dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put; + } + + pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); +@@ -471,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) + /* if x86-64 aperture base is beyond 4G, exit here */ + if ( (apbase & 0x7fff) >> (32 - 25) ) { + dev_info(&pdev->dev, "aperture base > 4G\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put; + } + + apbase = (apbase & 0x7fff) << 25; +@@ -487,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev) + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); + pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); + ++ ret = 0; ++put: + pci_dev_put(dev1); + +- return 0; ++ return ret; + } + + static int __devinit agp_amd64_probe(struct pci_dev *pdev, +@@ -499,6 +508,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, + u8 cap_ptr; + int err; + ++ /* The Highlander principle */ ++ if (agp_bridges_found) ++ return -ENODEV; ++ + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + if (!cap_ptr) + return -ENODEV; +@@ -562,6 +575,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev) + amd64_aperture_sizes[bridge->aperture_size_idx].size); + agp_remove_bridge(bridge); + agp_put_bridge(bridge); ++ ++ agp_bridges_found--; + } + + #ifdef CONFIG_PM +@@ -709,6 +724,11 @@ static struct pci_device_id agp_amd64_pci_table[] = { + + MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); + ++static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { ++ { PCI_DEVICE_CLASS(0, 0) }, ++ { } ++}; ++ + static struct pci_driver agp_amd64_pci_driver = { + .name = "agpgart-amd64", + .id_table = agp_amd64_pci_table, +@@ -734,7 +754,6 @@ int __init agp_amd64_init(void) + return err; + + if (agp_bridges_found == 0) { +- struct pci_dev *dev; + if (!agp_try_unsupported && !agp_try_unsupported_boot) { + printk(KERN_INFO PFX "No supported AGP bridge found.\n"); + #ifdef MODULE +@@ -750,17 +769,10 @@ int __init agp_amd64_init(void) + return -ENODEV; + + /* Look for any AGP bridge */ +- dev = NULL; +- err = -ENODEV; +- for_each_pci_dev(dev) { +- if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) +- continue; +- /* Only one bridge supported right now */ +- if (agp_amd64_probe(dev, NULL) == 0) { +- err = 0; +- break; +- } +- } ++ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; ++ err = driver_attach(&agp_amd64_pci_driver.driver); ++ if (err == 0 && agp_bridges_found == 0) ++ err = -ENODEV; + } + return err; + } +diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c +index 3b2ecbe..dc30e22 100644 +--- a/drivers/char/agp/ati-agp.c ++++ b/drivers/char/agp/ati-agp.c +@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) + { + struct aper_size_info_lvl2 *value; + struct ati_page_map page_dir; ++ unsigned long __iomem *cur_gatt; + unsigned long addr; + int retval; + u32 temp; +@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) + readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ + } + ++ for (i = 0; i < value->num_entries; i++) { ++ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; ++ cur_gatt = GET_GATT(addr); ++ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); ++ } ++ + return 0; + } + +@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = { + .aperture_sizes = ati_generic_sizes, + .size_type = LVL2_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = ati_configure, + .fetch_size = ati_fetch_size, + .cleanup = ati_cleanup, +diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c +index 793f39e..aa109cb 100644 +--- a/drivers/char/agp/efficeon-agp.c ++++ b/drivers/char/agp/efficeon-agp.c +@@ -28,6 +28,7 @@ + #include + #include + #include "agp.h" ++#include "intel-agp.h" + + /* + * The real differences to the generic AGP code is +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c +index aa4248e..d836a71 100644 +--- a/drivers/char/agp/intel-agp.c ++++ b/drivers/char/agp/intel-agp.c +@@ -11,1531 +11,13 @@ + #include + #include + #include "agp.h" ++#include "intel-agp.h" ++ ++#include "intel-gtt.c" + + int intel_agp_enabled; + EXPORT_SYMBOL(intel_agp_enabled); + +-/* +- * If we have Intel graphics, we're not going to have anything other than +- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent +- * on the Intel IOMMU support (CONFIG_DMAR). +- * Only newer chipsets need to bother with this, of course. +- */ +-#ifdef CONFIG_DMAR +-#define USE_PCI_DMA_API 1 +-#endif +- +-#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 +-#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a +-#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 +-#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 +-#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 +-#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 +-#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 +-#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 +-#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 +-#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 +-#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 +-#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 +-#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 +-#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 +-#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC +-#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE +-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 +-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 +-#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 +-#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 +-#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 +-#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 +-#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 +-#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 +-#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 +-#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 +-#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 +-#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 +-#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 +-#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 +-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 +-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 +-#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 +-#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 +-#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 +-#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 +-#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 +-#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 +-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 +-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 +-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 +-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 +-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a +-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 +-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 +-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 +-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 +-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 +- +-/* cover 915 and 945 variants */ +-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) +- +-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) +- +-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) +- +-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) +- +-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) +- +-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ +- IS_SNB) +- +-extern int agp_memory_reserved; +- +- +-/* Intel 815 register */ +-#define INTEL_815_APCONT 0x51 +-#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF +- +-/* Intel i820 registers */ +-#define INTEL_I820_RDCR 0x51 +-#define INTEL_I820_ERRSTS 0xc8 +- +-/* Intel i840 registers */ +-#define INTEL_I840_MCHCFG 0x50 +-#define INTEL_I840_ERRSTS 0xc8 +- +-/* Intel i850 registers */ +-#define INTEL_I850_MCHCFG 0x50 +-#define INTEL_I850_ERRSTS 0xc8 +- +-/* intel 915G registers */ +-#define I915_GMADDR 0x18 +-#define I915_MMADDR 0x10 +-#define I915_PTEADDR 0x1C +-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) +-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) +-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) +-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) +-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) +-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) +-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) +-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) +- +-#define I915_IFPADDR 0x60 +- +-/* Intel 965G registers */ +-#define I965_MSAC 0x62 +-#define I965_IFPADDR 0x70 +- +-/* Intel 7505 registers */ +-#define INTEL_I7505_APSIZE 0x74 +-#define INTEL_I7505_NCAPID 0x60 +-#define INTEL_I7505_NISTAT 0x6c +-#define INTEL_I7505_ATTBASE 0x78 +-#define INTEL_I7505_ERRSTS 0x42 +-#define INTEL_I7505_AGPCTRL 0x70 +-#define INTEL_I7505_MCHCFG 0x50 +- +-#define SNB_GMCH_CTRL 0x50 +-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 +-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) +-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) +-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) +-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) +-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) +-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) +-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) +-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) +-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) +-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) +-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) +-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) +-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) +-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) +-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) +-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) +-#define SNB_GTT_SIZE_0M (0 << 8) +-#define SNB_GTT_SIZE_1M (1 << 8) +-#define SNB_GTT_SIZE_2M (2 << 8) +-#define SNB_GTT_SIZE_MASK (3 << 8) +- +-static const struct aper_size_info_fixed intel_i810_sizes[] = +-{ +- {64, 16384, 4}, +- /* The 32M mode still requires a 64k gatt */ +- {32, 8192, 4} +-}; +- +-#define AGP_DCACHE_MEMORY 1 +-#define AGP_PHYS_MEMORY 2 +-#define INTEL_AGP_CACHED_MEMORY 3 +- +-static struct gatt_mask intel_i810_masks[] = +-{ +- {.mask = I810_PTE_VALID, .type = 0}, +- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, +- {.mask = I810_PTE_VALID, .type = 0}, +- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, +- .type = INTEL_AGP_CACHED_MEMORY} +-}; +- +-static struct _intel_private { +- struct pci_dev *pcidev; /* device one */ +- u8 __iomem *registers; +- u32 __iomem *gtt; /* I915G */ +- int num_dcache_entries; +- /* gtt_entries is the number of gtt entries that are already mapped +- * to stolen memory. Stolen memory is larger than the memory mapped +- * through gtt_entries, as it includes some reserved space for the BIOS +- * popup and for the GTT. +- */ +- int gtt_entries; /* i830+ */ +- int gtt_total_size; +- union { +- void __iomem *i9xx_flush_page; +- void *i8xx_flush_page; +- }; +- struct page *i8xx_page; +- struct resource ifp_resource; +- int resource_valid; +-} intel_private; +- +-#ifdef USE_PCI_DMA_API +-static int intel_agp_map_page(struct page *page, dma_addr_t *ret) +-{ +- *ret = pci_map_page(intel_private.pcidev, page, 0, +- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +- if (pci_dma_mapping_error(intel_private.pcidev, *ret)) +- return -EINVAL; +- return 0; +-} +- +-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) +-{ +- pci_unmap_page(intel_private.pcidev, dma, +- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +-} +- +-static void intel_agp_free_sglist(struct agp_memory *mem) +-{ +- struct sg_table st; +- +- st.sgl = mem->sg_list; +- st.orig_nents = st.nents = mem->page_count; +- +- sg_free_table(&st); +- +- mem->sg_list = NULL; +- mem->num_sg = 0; +-} +- +-static int intel_agp_map_memory(struct agp_memory *mem) +-{ +- struct sg_table st; +- struct scatterlist *sg; +- int i; +- +- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); +- +- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) +- return -ENOMEM; +- +- mem->sg_list = sg = st.sgl; +- +- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) +- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); +- +- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, +- mem->page_count, PCI_DMA_BIDIRECTIONAL); +- if (unlikely(!mem->num_sg)) { +- intel_agp_free_sglist(mem); +- return -ENOMEM; +- } +- return 0; +-} +- +-static void intel_agp_unmap_memory(struct agp_memory *mem) +-{ +- DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); +- +- pci_unmap_sg(intel_private.pcidev, mem->sg_list, +- mem->page_count, PCI_DMA_BIDIRECTIONAL); +- intel_agp_free_sglist(mem); +-} +- +-static void intel_agp_insert_sg_entries(struct agp_memory *mem, +- off_t pg_start, int mask_type) +-{ +- struct scatterlist *sg; +- int i, j; +- +- j = pg_start; +- +- WARN_ON(!mem->num_sg); +- +- if (mem->num_sg == mem->page_count) { +- for_each_sg(mem->sg_list, sg, mem->page_count, i) { +- writel(agp_bridge->driver->mask_memory(agp_bridge, +- sg_dma_address(sg), mask_type), +- intel_private.gtt+j); +- j++; +- } +- } else { +- /* sg may merge pages, but we have to separate +- * per-page addr for GTT */ +- unsigned int len, m; +- +- for_each_sg(mem->sg_list, sg, mem->num_sg, i) { +- len = sg_dma_len(sg) / PAGE_SIZE; +- for (m = 0; m < len; m++) { +- writel(agp_bridge->driver->mask_memory(agp_bridge, +- sg_dma_address(sg) + m * PAGE_SIZE, +- mask_type), +- intel_private.gtt+j); +- j++; +- } +- } +- } +- readl(intel_private.gtt+j-1); +-} +- +-#else +- +-static void intel_agp_insert_sg_entries(struct agp_memory *mem, +- off_t pg_start, int mask_type) +-{ +- int i, j; +- u32 cache_bits = 0; +- +- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) +- { +- cache_bits = I830_PTE_SYSTEM_CACHED; +- } +- +- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { +- writel(agp_bridge->driver->mask_memory(agp_bridge, +- page_to_phys(mem->pages[i]), mask_type), +- intel_private.gtt+j); +- } +- +- readl(intel_private.gtt+j-1); +-} +- +-#endif +- +-static int intel_i810_fetch_size(void) +-{ +- u32 smram_miscc; +- struct aper_size_info_fixed *values; +- +- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); +- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); +- +- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { +- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); +- return 0; +- } +- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { +- agp_bridge->previous_size = +- agp_bridge->current_size = (void *) (values + 1); +- agp_bridge->aperture_size_idx = 1; +- return values[1].size; +- } else { +- agp_bridge->previous_size = +- agp_bridge->current_size = (void *) (values); +- agp_bridge->aperture_size_idx = 0; +- return values[0].size; +- } +- +- return 0; +-} +- +-static int intel_i810_configure(void) +-{ +- struct aper_size_info_fixed *current_size; +- u32 temp; +- int i; +- +- current_size = A_SIZE_FIX(agp_bridge->current_size); +- +- if (!intel_private.registers) { +- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); +- temp &= 0xfff80000; +- +- intel_private.registers = ioremap(temp, 128 * 4096); +- if (!intel_private.registers) { +- dev_err(&intel_private.pcidev->dev, +- "can't remap memory\n"); +- return -ENOMEM; +- } +- } +- +- if ((readl(intel_private.registers+I810_DRAM_CTL) +- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { +- /* This will need to be dynamically assigned */ +- dev_info(&intel_private.pcidev->dev, +- "detected 4MB dedicated video ram\n"); +- intel_private.num_dcache_entries = 1024; +- } +- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); +- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); +- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); +- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ +- +- if (agp_bridge->driver->needs_scratch_page) { +- for (i = 0; i < current_size->num_entries; i++) { +- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ +- } +- global_cache_flush(); +- return 0; +-} +- +-static void intel_i810_cleanup(void) +-{ +- writel(0, intel_private.registers+I810_PGETBL_CTL); +- readl(intel_private.registers); /* PCI Posting. */ +- iounmap(intel_private.registers); +-} +- +-static void intel_i810_tlbflush(struct agp_memory *mem) +-{ +- return; +-} +- +-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) +-{ +- return; +-} +- +-/* Exists to support ARGB cursors */ +-static struct page *i8xx_alloc_pages(void) +-{ +- struct page *page; +- +- page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); +- if (page == NULL) +- return NULL; +- +- if (set_pages_uc(page, 4) < 0) { +- set_pages_wb(page, 4); +- __free_pages(page, 2); +- return NULL; +- } +- get_page(page); +- atomic_inc(&agp_bridge->current_memory_agp); +- return page; +-} +- +-static void i8xx_destroy_pages(struct page *page) +-{ +- if (page == NULL) +- return; +- +- set_pages_wb(page, 4); +- put_page(page); +- __free_pages(page, 2); +- atomic_dec(&agp_bridge->current_memory_agp); +-} +- +-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, +- int type) +-{ +- if (type < AGP_USER_TYPES) +- return type; +- else if (type == AGP_USER_CACHED_MEMORY) +- return INTEL_AGP_CACHED_MEMORY; +- else +- return 0; +-} +- +-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, +- int type) +-{ +- int i, j, num_entries; +- void *temp; +- int ret = -EINVAL; +- int mask_type; +- +- if (mem->page_count == 0) +- goto out; +- +- temp = agp_bridge->current_size; +- num_entries = A_SIZE_FIX(temp)->num_entries; +- +- if ((pg_start + mem->page_count) > num_entries) +- goto out_err; +- +- +- for (j = pg_start; j < (pg_start + mem->page_count); j++) { +- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { +- ret = -EBUSY; +- goto out_err; +- } +- } +- +- if (type != mem->type) +- goto out_err; +- +- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); +- +- switch (mask_type) { +- case AGP_DCACHE_MEMORY: +- if (!mem->is_flushed) +- global_cache_flush(); +- for (i = pg_start; i < (pg_start + mem->page_count); i++) { +- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, +- intel_private.registers+I810_PTE_BASE+(i*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); +- break; +- case AGP_PHYS_MEMORY: +- case AGP_NORMAL_MEMORY: +- if (!mem->is_flushed) +- global_cache_flush(); +- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { +- writel(agp_bridge->driver->mask_memory(agp_bridge, +- page_to_phys(mem->pages[i]), mask_type), +- intel_private.registers+I810_PTE_BASE+(j*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); +- break; +- default: +- goto out_err; +- } +- +- agp_bridge->driver->tlb_flush(mem); +-out: +- ret = 0; +-out_err: +- mem->is_flushed = true; +- return ret; +-} +- +-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, +- int type) +-{ +- int i; +- +- if (mem->page_count == 0) +- return 0; +- +- for (i = pg_start; i < (mem->page_count + pg_start); i++) { +- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); +- +- agp_bridge->driver->tlb_flush(mem); +- return 0; +-} +- +-/* +- * The i810/i830 requires a physical address to program its mouse +- * pointer into hardware. +- * However the Xserver still writes to it through the agp aperture. +- */ +-static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) +-{ +- struct agp_memory *new; +- struct page *page; +- +- switch (pg_count) { +- case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); +- break; +- case 4: +- /* kludge to get 4 physical pages for ARGB cursor */ +- page = i8xx_alloc_pages(); +- break; +- default: +- return NULL; +- } +- +- if (page == NULL) +- return NULL; +- +- new = agp_create_memory(pg_count); +- if (new == NULL) +- return NULL; +- +- new->pages[0] = page; +- if (pg_count == 4) { +- /* kludge to get 4 physical pages for ARGB cursor */ +- new->pages[1] = new->pages[0] + 1; +- new->pages[2] = new->pages[1] + 1; +- new->pages[3] = new->pages[2] + 1; +- } +- new->page_count = pg_count; +- new->num_scratch_pages = pg_count; +- new->type = AGP_PHYS_MEMORY; +- new->physical = page_to_phys(new->pages[0]); +- return new; +-} +- +-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) +-{ +- struct agp_memory *new; +- +- if (type == AGP_DCACHE_MEMORY) { +- if (pg_count != intel_private.num_dcache_entries) +- return NULL; +- +- new = agp_create_memory(1); +- if (new == NULL) +- return NULL; +- +- new->type = AGP_DCACHE_MEMORY; +- new->page_count = pg_count; +- new->num_scratch_pages = 0; +- agp_free_page_array(new); +- return new; +- } +- if (type == AGP_PHYS_MEMORY) +- return alloc_agpphysmem_i8xx(pg_count, type); +- return NULL; +-} +- +-static void intel_i810_free_by_type(struct agp_memory *curr) +-{ +- agp_free_key(curr->key); +- if (curr->type == AGP_PHYS_MEMORY) { +- if (curr->page_count == 4) +- i8xx_destroy_pages(curr->pages[0]); +- else { +- agp_bridge->driver->agp_destroy_page(curr->pages[0], +- AGP_PAGE_DESTROY_UNMAP); +- agp_bridge->driver->agp_destroy_page(curr->pages[0], +- AGP_PAGE_DESTROY_FREE); +- } +- agp_free_page_array(curr); +- } +- kfree(curr); +-} +- +-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, +- dma_addr_t addr, int type) +-{ +- /* Type checking must be done elsewhere */ +- return addr | bridge->driver->masks[type].mask; +-} +- +-static struct aper_size_info_fixed intel_i830_sizes[] = +-{ +- {128, 32768, 5}, +- /* The 64M mode still requires a 128k gatt */ +- {64, 16384, 5}, +- {256, 65536, 6}, +- {512, 131072, 7}, +-}; +- +-static void intel_i830_init_gtt_entries(void) +-{ +- u16 gmch_ctrl; +- int gtt_entries = 0; +- u8 rdct; +- int local = 0; +- static const int ddt[4] = { 0, 16, 32, 64 }; +- int size; /* reserved space (in kb) at the top of stolen memory */ +- +- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); +- +- if (IS_I965) { +- u32 pgetbl_ctl; +- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); +- +- /* The 965 has a field telling us the size of the GTT, +- * which may be larger than what is necessary to map the +- * aperture. +- */ +- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { +- case I965_PGETBL_SIZE_128KB: +- size = 128; +- break; +- case I965_PGETBL_SIZE_256KB: +- size = 256; +- break; +- case I965_PGETBL_SIZE_512KB: +- size = 512; +- break; +- case I965_PGETBL_SIZE_1MB: +- size = 1024; +- break; +- case I965_PGETBL_SIZE_2MB: +- size = 2048; +- break; +- case I965_PGETBL_SIZE_1_5MB: +- size = 1024 + 512; +- break; +- default: +- dev_info(&intel_private.pcidev->dev, +- "unknown page table size, assuming 512KB\n"); +- size = 512; +- } +- size += 4; /* add in BIOS popup space */ +- } else if (IS_G33 && !IS_PINEVIEW) { +- /* G33's GTT size defined in gmch_ctrl */ +- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { +- case G33_PGETBL_SIZE_1M: +- size = 1024; +- break; +- case G33_PGETBL_SIZE_2M: +- size = 2048; +- break; +- default: +- dev_info(&agp_bridge->dev->dev, +- "unknown page table size 0x%x, assuming 512KB\n", +- (gmch_ctrl & G33_PGETBL_SIZE_MASK)); +- size = 512; +- } +- size += 4; +- } else if (IS_G4X || IS_PINEVIEW) { +- /* On 4 series hardware, GTT stolen is separate from graphics +- * stolen, ignore it in stolen gtt entries counting. However, +- * 4KB of the stolen memory doesn't get mapped to the GTT. +- */ +- size = 4; +- } else { +- /* On previous hardware, the GTT size was just what was +- * required to map the aperture. +- */ +- size = agp_bridge->driver->fetch_size() + 4; +- } +- +- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { +- switch (gmch_ctrl & I830_GMCH_GMS_MASK) { +- case I830_GMCH_GMS_STOLEN_512: +- gtt_entries = KB(512) - KB(size); +- break; +- case I830_GMCH_GMS_STOLEN_1024: +- gtt_entries = MB(1) - KB(size); +- break; +- case I830_GMCH_GMS_STOLEN_8192: +- gtt_entries = MB(8) - KB(size); +- break; +- case I830_GMCH_GMS_LOCAL: +- rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); +- gtt_entries = (I830_RDRAM_ND(rdct) + 1) * +- MB(ddt[I830_RDRAM_DDT(rdct)]); +- local = 1; +- break; +- default: +- gtt_entries = 0; +- break; +- } +- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { +- /* +- * SandyBridge has new memory control reg at 0x50.w +- */ +- u16 snb_gmch_ctl; +- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); +- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { +- case SNB_GMCH_GMS_STOLEN_32M: +- gtt_entries = MB(32) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_64M: +- gtt_entries = MB(64) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_96M: +- gtt_entries = MB(96) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_128M: +- gtt_entries = MB(128) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_160M: +- gtt_entries = MB(160) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_192M: +- gtt_entries = MB(192) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_224M: +- gtt_entries = MB(224) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_256M: +- gtt_entries = MB(256) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_288M: +- gtt_entries = MB(288) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_320M: +- gtt_entries = MB(320) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_352M: +- gtt_entries = MB(352) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_384M: +- gtt_entries = MB(384) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_416M: +- gtt_entries = MB(416) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_448M: +- gtt_entries = MB(448) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_480M: +- gtt_entries = MB(480) - KB(size); +- break; +- case SNB_GMCH_GMS_STOLEN_512M: +- gtt_entries = MB(512) - KB(size); +- break; +- } +- } else { +- switch (gmch_ctrl & I855_GMCH_GMS_MASK) { +- case I855_GMCH_GMS_STOLEN_1M: +- gtt_entries = MB(1) - KB(size); +- break; +- case I855_GMCH_GMS_STOLEN_4M: +- gtt_entries = MB(4) - KB(size); +- break; +- case I855_GMCH_GMS_STOLEN_8M: +- gtt_entries = MB(8) - KB(size); +- break; +- case I855_GMCH_GMS_STOLEN_16M: +- gtt_entries = MB(16) - KB(size); +- break; +- case I855_GMCH_GMS_STOLEN_32M: +- gtt_entries = MB(32) - KB(size); +- break; +- case I915_GMCH_GMS_STOLEN_48M: +- /* Check it's really I915G */ +- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) +- gtt_entries = MB(48) - KB(size); +- else +- gtt_entries = 0; +- break; +- case I915_GMCH_GMS_STOLEN_64M: +- /* Check it's really I915G */ +- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) +- gtt_entries = MB(64) - KB(size); +- else +- gtt_entries = 0; +- break; +- case G33_GMCH_GMS_STOLEN_128M: +- if (IS_G33 || IS_I965 || IS_G4X) +- gtt_entries = MB(128) - KB(size); +- else +- gtt_entries = 0; +- break; +- case G33_GMCH_GMS_STOLEN_256M: +- if (IS_G33 || IS_I965 || IS_G4X) +- gtt_entries = MB(256) - KB(size); +- else +- gtt_entries = 0; +- break; +- case INTEL_GMCH_GMS_STOLEN_96M: +- if (IS_I965 || IS_G4X) +- gtt_entries = MB(96) - KB(size); +- else +- gtt_entries = 0; +- break; +- case INTEL_GMCH_GMS_STOLEN_160M: +- if (IS_I965 || IS_G4X) +- gtt_entries = MB(160) - KB(size); +- else +- gtt_entries = 0; +- break; +- case INTEL_GMCH_GMS_STOLEN_224M: +- if (IS_I965 || IS_G4X) +- gtt_entries = MB(224) - KB(size); +- else +- gtt_entries = 0; +- break; +- case INTEL_GMCH_GMS_STOLEN_352M: +- if (IS_I965 || IS_G4X) +- gtt_entries = MB(352) - KB(size); +- else +- gtt_entries = 0; +- break; +- default: +- gtt_entries = 0; +- break; +- } +- } +- if (gtt_entries > 0) { +- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", +- gtt_entries / KB(1), local ? "local" : "stolen"); +- gtt_entries /= KB(4); +- } else { +- dev_info(&agp_bridge->dev->dev, +- "no pre-allocated video memory detected\n"); +- gtt_entries = 0; +- } +- +- intel_private.gtt_entries = gtt_entries; +-} +- +-static void intel_i830_fini_flush(void) +-{ +- kunmap(intel_private.i8xx_page); +- intel_private.i8xx_flush_page = NULL; +- unmap_page_from_agp(intel_private.i8xx_page); +- +- __free_page(intel_private.i8xx_page); +- intel_private.i8xx_page = NULL; +-} +- +-static void intel_i830_setup_flush(void) +-{ +- /* return if we've already set the flush mechanism up */ +- if (intel_private.i8xx_page) +- return; +- +- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); +- if (!intel_private.i8xx_page) +- return; +- +- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); +- if (!intel_private.i8xx_flush_page) +- intel_i830_fini_flush(); +-} +- +-/* The chipset_flush interface needs to get data that has already been +- * flushed out of the CPU all the way out to main memory, because the GPU +- * doesn't snoop those buffers. +- * +- * The 8xx series doesn't have the same lovely interface for flushing the +- * chipset write buffers that the later chips do. According to the 865 +- * specs, it's 64 octwords, or 1KB. So, to get those previous things in +- * that buffer out, we just fill 1KB and clflush it out, on the assumption +- * that it'll push whatever was in there out. It appears to work. +- */ +-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) +-{ +- unsigned int *pg = intel_private.i8xx_flush_page; +- +- memset(pg, 0, 1024); +- +- if (cpu_has_clflush) +- clflush_cache_range(pg, 1024); +- else if (wbinvd_on_all_cpus() != 0) +- printk(KERN_ERR "Timed out waiting for cache flush.\n"); +-} +- +-/* The intel i830 automatically initializes the agp aperture during POST. +- * Use the memory already set aside for in the GTT. +- */ +-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) +-{ +- int page_order; +- struct aper_size_info_fixed *size; +- int num_entries; +- u32 temp; +- +- size = agp_bridge->current_size; +- page_order = size->page_order; +- num_entries = size->num_entries; +- agp_bridge->gatt_table_real = NULL; +- +- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); +- temp &= 0xfff80000; +- +- intel_private.registers = ioremap(temp, 128 * 4096); +- if (!intel_private.registers) +- return -ENOMEM; +- +- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; +- global_cache_flush(); /* FIXME: ?? */ +- +- /* we have to call this as early as possible after the MMIO base address is known */ +- intel_i830_init_gtt_entries(); +- +- agp_bridge->gatt_table = NULL; +- +- agp_bridge->gatt_bus_addr = temp; +- +- return 0; +-} +- +-/* Return the gatt table to a sane state. Use the top of stolen +- * memory for the GTT. +- */ +-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) +-{ +- return 0; +-} +- +-static int intel_i830_fetch_size(void) +-{ +- u16 gmch_ctrl; +- struct aper_size_info_fixed *values; +- +- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); +- +- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && +- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { +- /* 855GM/852GM/865G has 128MB aperture size */ +- agp_bridge->previous_size = agp_bridge->current_size = (void *) values; +- agp_bridge->aperture_size_idx = 0; +- return values[0].size; +- } +- +- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); +- +- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { +- agp_bridge->previous_size = agp_bridge->current_size = (void *) values; +- agp_bridge->aperture_size_idx = 0; +- return values[0].size; +- } else { +- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1); +- agp_bridge->aperture_size_idx = 1; +- return values[1].size; +- } +- +- return 0; +-} +- +-static int intel_i830_configure(void) +-{ +- struct aper_size_info_fixed *current_size; +- u32 temp; +- u16 gmch_ctrl; +- int i; +- +- current_size = A_SIZE_FIX(agp_bridge->current_size); +- +- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); +- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); +- +- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); +- gmch_ctrl |= I830_GMCH_ENABLED; +- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); +- +- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); +- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ +- +- if (agp_bridge->driver->needs_scratch_page) { +- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { +- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ +- } +- +- global_cache_flush(); +- +- intel_i830_setup_flush(); +- return 0; +-} +- +-static void intel_i830_cleanup(void) +-{ +- iounmap(intel_private.registers); +-} +- +-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, +- int type) +-{ +- int i, j, num_entries; +- void *temp; +- int ret = -EINVAL; +- int mask_type; +- +- if (mem->page_count == 0) +- goto out; +- +- temp = agp_bridge->current_size; +- num_entries = A_SIZE_FIX(temp)->num_entries; +- +- if (pg_start < intel_private.gtt_entries) { +- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, +- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", +- pg_start, intel_private.gtt_entries); +- +- dev_info(&intel_private.pcidev->dev, +- "trying to insert into local/stolen memory\n"); +- goto out_err; +- } +- +- if ((pg_start + mem->page_count) > num_entries) +- goto out_err; +- +- /* The i830 can't check the GTT for entries since its read only, +- * depend on the caller to make the correct offset decisions. +- */ +- +- if (type != mem->type) +- goto out_err; +- +- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); +- +- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && +- mask_type != INTEL_AGP_CACHED_MEMORY) +- goto out_err; +- +- if (!mem->is_flushed) +- global_cache_flush(); +- +- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { +- writel(agp_bridge->driver->mask_memory(agp_bridge, +- page_to_phys(mem->pages[i]), mask_type), +- intel_private.registers+I810_PTE_BASE+(j*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); +- agp_bridge->driver->tlb_flush(mem); +- +-out: +- ret = 0; +-out_err: +- mem->is_flushed = true; +- return ret; +-} +- +-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, +- int type) +-{ +- int i; +- +- if (mem->page_count == 0) +- return 0; +- +- if (pg_start < intel_private.gtt_entries) { +- dev_info(&intel_private.pcidev->dev, +- "trying to disable local/stolen memory\n"); +- return -EINVAL; +- } +- +- for (i = pg_start; i < (mem->page_count + pg_start); i++) { +- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); +- } +- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); +- +- agp_bridge->driver->tlb_flush(mem); +- return 0; +-} +- +-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) +-{ +- if (type == AGP_PHYS_MEMORY) +- return alloc_agpphysmem_i8xx(pg_count, type); +- /* always return NULL for other allocation types for now */ +- return NULL; +-} +- +-static int intel_alloc_chipset_flush_resource(void) +-{ +- int ret; +- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, +- PAGE_SIZE, PCIBIOS_MIN_MEM, 0, +- pcibios_align_resource, agp_bridge->dev); +- +- return ret; +-} +- +-static void intel_i915_setup_chipset_flush(void) +-{ +- int ret; +- u32 temp; +- +- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); +- if (!(temp & 0x1)) { +- intel_alloc_chipset_flush_resource(); +- intel_private.resource_valid = 1; +- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); +- } else { +- temp &= ~1; +- +- intel_private.resource_valid = 1; +- intel_private.ifp_resource.start = temp; +- intel_private.ifp_resource.end = temp + PAGE_SIZE; +- ret = request_resource(&iomem_resource, &intel_private.ifp_resource); +- /* some BIOSes reserve this area in a pnp some don't */ +- if (ret) +- intel_private.resource_valid = 0; +- } +-} +- +-static void intel_i965_g33_setup_chipset_flush(void) +-{ +- u32 temp_hi, temp_lo; +- int ret; +- +- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); +- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); +- +- if (!(temp_lo & 0x1)) { +- +- intel_alloc_chipset_flush_resource(); +- +- intel_private.resource_valid = 1; +- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, +- upper_32_bits(intel_private.ifp_resource.start)); +- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); +- } else { +- u64 l64; +- +- temp_lo &= ~0x1; +- l64 = ((u64)temp_hi << 32) | temp_lo; +- +- intel_private.resource_valid = 1; +- intel_private.ifp_resource.start = l64; +- intel_private.ifp_resource.end = l64 + PAGE_SIZE; +- ret = request_resource(&iomem_resource, &intel_private.ifp_resource); +- /* some BIOSes reserve this area in a pnp some don't */ +- if (ret) +- intel_private.resource_valid = 0; +- } +-} +- +-static void intel_i9xx_setup_flush(void) +-{ +- /* return if already configured */ +- if (intel_private.ifp_resource.start) +- return; +- +- if (IS_SNB) +- return; +- +- /* setup a resource for this object */ +- intel_private.ifp_resource.name = "Intel Flush Page"; +- intel_private.ifp_resource.flags = IORESOURCE_MEM; +- +- /* Setup chipset flush for 915 */ +- if (IS_I965 || IS_G33 || IS_G4X) { +- intel_i965_g33_setup_chipset_flush(); +- } else { +- intel_i915_setup_chipset_flush(); +- } +- +- if (intel_private.ifp_resource.start) { +- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); +- if (!intel_private.i9xx_flush_page) +- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); +- } +-} +- +-static int intel_i915_configure(void) +-{ +- struct aper_size_info_fixed *current_size; +- u32 temp; +- u16 gmch_ctrl; +- int i; +- +- current_size = A_SIZE_FIX(agp_bridge->current_size); +- +- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); +- +- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); +- +- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); +- gmch_ctrl |= I830_GMCH_ENABLED; +- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); +- +- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); +- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ +- +- if (agp_bridge->driver->needs_scratch_page) { +- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { +- writel(agp_bridge->scratch_page, intel_private.gtt+i); +- } +- readl(intel_private.gtt+i-1); /* PCI Posting. */ +- } +- +- global_cache_flush(); +- +- intel_i9xx_setup_flush(); +- +- return 0; +-} +- +-static void intel_i915_cleanup(void) +-{ +- if (intel_private.i9xx_flush_page) +- iounmap(intel_private.i9xx_flush_page); +- if (intel_private.resource_valid) +- release_resource(&intel_private.ifp_resource); +- intel_private.ifp_resource.start = 0; +- intel_private.resource_valid = 0; +- iounmap(intel_private.gtt); +- iounmap(intel_private.registers); +-} +- +-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) +-{ +- if (intel_private.i9xx_flush_page) +- writel(1, intel_private.i9xx_flush_page); +-} +- +-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, +- int type) +-{ +- int num_entries; +- void *temp; +- int ret = -EINVAL; +- int mask_type; +- +- if (mem->page_count == 0) +- goto out; +- +- temp = agp_bridge->current_size; +- num_entries = A_SIZE_FIX(temp)->num_entries; +- +- if (pg_start < intel_private.gtt_entries) { +- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, +- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", +- pg_start, intel_private.gtt_entries); +- +- dev_info(&intel_private.pcidev->dev, +- "trying to insert into local/stolen memory\n"); +- goto out_err; +- } +- +- if ((pg_start + mem->page_count) > num_entries) +- goto out_err; +- +- /* The i915 can't check the GTT for entries since it's read only; +- * depend on the caller to make the correct offset decisions. +- */ +- +- if (type != mem->type) +- goto out_err; +- +- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); +- +- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && +- mask_type != INTEL_AGP_CACHED_MEMORY) +- goto out_err; +- +- if (!mem->is_flushed) +- global_cache_flush(); +- +- intel_agp_insert_sg_entries(mem, pg_start, mask_type); +- agp_bridge->driver->tlb_flush(mem); +- +- out: +- ret = 0; +- out_err: +- mem->is_flushed = true; +- return ret; +-} +- +-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, +- int type) +-{ +- int i; +- +- if (mem->page_count == 0) +- return 0; +- +- if (pg_start < intel_private.gtt_entries) { +- dev_info(&intel_private.pcidev->dev, +- "trying to disable local/stolen memory\n"); +- return -EINVAL; +- } +- +- for (i = pg_start; i < (mem->page_count + pg_start); i++) +- writel(agp_bridge->scratch_page, intel_private.gtt+i); +- +- readl(intel_private.gtt+i-1); +- +- agp_bridge->driver->tlb_flush(mem); +- return 0; +-} +- +-/* Return the aperture size by just checking the resource length. The effect +- * described in the spec of the MSAC registers is just changing of the +- * resource size. +- */ +-static int intel_i9xx_fetch_size(void) +-{ +- int num_sizes = ARRAY_SIZE(intel_i830_sizes); +- int aper_size; /* size in megabytes */ +- int i; +- +- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); +- +- for (i = 0; i < num_sizes; i++) { +- if (aper_size == intel_i830_sizes[i].size) { +- agp_bridge->current_size = intel_i830_sizes + i; +- agp_bridge->previous_size = agp_bridge->current_size; +- return aper_size; +- } +- } +- +- return 0; +-} +- +-/* The intel i915 automatically initializes the agp aperture during POST. +- * Use the memory already set aside for in the GTT. +- */ +-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) +-{ +- int page_order; +- struct aper_size_info_fixed *size; +- int num_entries; +- u32 temp, temp2; +- int gtt_map_size = 256 * 1024; +- +- size = agp_bridge->current_size; +- page_order = size->page_order; +- num_entries = size->num_entries; +- agp_bridge->gatt_table_real = NULL; +- +- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); +- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); +- +- if (IS_G33) +- gtt_map_size = 1024 * 1024; /* 1M on G33 */ +- intel_private.gtt = ioremap(temp2, gtt_map_size); +- if (!intel_private.gtt) +- return -ENOMEM; +- +- intel_private.gtt_total_size = gtt_map_size / 4; +- +- temp &= 0xfff80000; +- +- intel_private.registers = ioremap(temp, 128 * 4096); +- if (!intel_private.registers) { +- iounmap(intel_private.gtt); +- return -ENOMEM; +- } +- +- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; +- global_cache_flush(); /* FIXME: ? */ +- +- /* we have to call this as early as possible after the MMIO base address is known */ +- intel_i830_init_gtt_entries(); +- +- agp_bridge->gatt_table = NULL; +- +- agp_bridge->gatt_bus_addr = temp; +- +- return 0; +-} +- +-/* +- * The i965 supports 36-bit physical addresses, but to keep +- * the format of the GTT the same, the bits that don't fit +- * in a 32-bit word are shifted down to bits 4..7. +- * +- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" +- * is always zero on 32-bit architectures, so no need to make +- * this conditional. +- */ +-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, +- dma_addr_t addr, int type) +-{ +- /* Shift high bits down */ +- addr |= (addr >> 28) & 0xf0; +- +- /* Type checking must be done elsewhere */ +- return addr | bridge->driver->masks[type].mask; +-} +- +-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) +-{ +- u16 snb_gmch_ctl; +- +- switch (agp_bridge->dev->device) { +- case PCI_DEVICE_ID_INTEL_GM45_HB: +- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: +- case PCI_DEVICE_ID_INTEL_Q45_HB: +- case PCI_DEVICE_ID_INTEL_G45_HB: +- case PCI_DEVICE_ID_INTEL_G41_HB: +- case PCI_DEVICE_ID_INTEL_B43_HB: +- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: +- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: +- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: +- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: +- *gtt_offset = *gtt_size = MB(2); +- break; +- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: +- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: +- *gtt_offset = MB(2); +- +- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); +- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { +- default: +- case SNB_GTT_SIZE_0M: +- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); +- *gtt_size = MB(0); +- break; +- case SNB_GTT_SIZE_1M: +- *gtt_size = MB(1); +- break; +- case SNB_GTT_SIZE_2M: +- *gtt_size = MB(2); +- break; +- } +- break; +- default: +- *gtt_offset = *gtt_size = KB(512); +- } +-} +- +-/* The intel i965 automatically initializes the agp aperture during POST. +- * Use the memory already set aside for in the GTT. +- */ +-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) +-{ +- int page_order; +- struct aper_size_info_fixed *size; +- int num_entries; +- u32 temp; +- int gtt_offset, gtt_size; +- +- size = agp_bridge->current_size; +- page_order = size->page_order; +- num_entries = size->num_entries; +- agp_bridge->gatt_table_real = NULL; +- +- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); +- +- temp &= 0xfff00000; +- +- intel_i965_get_gtt_range(>t_offset, >t_size); +- +- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); +- +- if (!intel_private.gtt) +- return -ENOMEM; +- +- intel_private.gtt_total_size = gtt_size / 4; +- +- intel_private.registers = ioremap(temp, 128 * 4096); +- if (!intel_private.registers) { +- iounmap(intel_private.gtt); +- return -ENOMEM; +- } +- +- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; +- global_cache_flush(); /* FIXME: ? */ +- +- /* we have to call this as early as possible after the MMIO base address is known */ +- intel_i830_init_gtt_entries(); +- +- agp_bridge->gatt_table = NULL; +- +- agp_bridge->gatt_bus_addr = temp; +- +- return 0; +-} +- +- + static int intel_fetch_size(void) + { + int i; +@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = { + .aperture_sizes = intel_generic_sizes, + .size_type = U16_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_configure, + .fetch_size = intel_fetch_size, + .cleanup = intel_cleanup, +@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = { + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + }; + +-static const struct agp_bridge_driver intel_810_driver = { +- .owner = THIS_MODULE, +- .aperture_sizes = intel_i810_sizes, +- .size_type = FIXED_APER_SIZE, +- .num_aperture_sizes = 2, +- .needs_scratch_page = true, +- .configure = intel_i810_configure, +- .fetch_size = intel_i810_fetch_size, +- .cleanup = intel_i810_cleanup, +- .tlb_flush = intel_i810_tlbflush, +- .mask_memory = intel_i810_mask_memory, +- .masks = intel_i810_masks, +- .agp_enable = intel_i810_agp_enable, +- .cache_flush = global_cache_flush, +- .create_gatt_table = agp_generic_create_gatt_table, +- .free_gatt_table = agp_generic_free_gatt_table, +- .insert_memory = intel_i810_insert_entries, +- .remove_memory = intel_i810_remove_entries, +- .alloc_by_type = intel_i810_alloc_by_type, +- .free_by_type = intel_i810_free_by_type, +- .agp_alloc_page = agp_generic_alloc_page, +- .agp_alloc_pages = agp_generic_alloc_pages, +- .agp_destroy_page = agp_generic_destroy_page, +- .agp_destroy_pages = agp_generic_destroy_pages, +- .agp_type_to_mask_type = agp_generic_type_to_mask_type, +-}; +- + static const struct agp_bridge_driver intel_815_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_815_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 2, ++ .needs_scratch_page = true, + .configure = intel_815_configure, + .fetch_size = intel_815_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = { + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + }; + +-static const struct agp_bridge_driver intel_830_driver = { +- .owner = THIS_MODULE, +- .aperture_sizes = intel_i830_sizes, +- .size_type = FIXED_APER_SIZE, +- .num_aperture_sizes = 4, +- .needs_scratch_page = true, +- .configure = intel_i830_configure, +- .fetch_size = intel_i830_fetch_size, +- .cleanup = intel_i830_cleanup, +- .tlb_flush = intel_i810_tlbflush, +- .mask_memory = intel_i810_mask_memory, +- .masks = intel_i810_masks, +- .agp_enable = intel_i810_agp_enable, +- .cache_flush = global_cache_flush, +- .create_gatt_table = intel_i830_create_gatt_table, +- .free_gatt_table = intel_i830_free_gatt_table, +- .insert_memory = intel_i830_insert_entries, +- .remove_memory = intel_i830_remove_entries, +- .alloc_by_type = intel_i830_alloc_by_type, +- .free_by_type = intel_i810_free_by_type, +- .agp_alloc_page = agp_generic_alloc_page, +- .agp_alloc_pages = agp_generic_alloc_pages, +- .agp_destroy_page = agp_generic_destroy_page, +- .agp_destroy_pages = agp_generic_destroy_pages, +- .agp_type_to_mask_type = intel_i830_type_to_mask_type, +- .chipset_flush = intel_i830_chipset_flush, +-}; +- + static const struct agp_bridge_driver intel_820_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_820_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_820_cleanup, +@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = { + .aperture_sizes = intel_830mp_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 4, ++ .needs_scratch_page = true, + .configure = intel_830mp_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = { + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_840_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = { + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_845_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = { + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_850_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = { + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_860_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = { + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + }; + +-static const struct agp_bridge_driver intel_915_driver = { +- .owner = THIS_MODULE, +- .aperture_sizes = intel_i830_sizes, +- .size_type = FIXED_APER_SIZE, +- .num_aperture_sizes = 4, +- .needs_scratch_page = true, +- .configure = intel_i915_configure, +- .fetch_size = intel_i9xx_fetch_size, +- .cleanup = intel_i915_cleanup, +- .tlb_flush = intel_i810_tlbflush, +- .mask_memory = intel_i810_mask_memory, +- .masks = intel_i810_masks, +- .agp_enable = intel_i810_agp_enable, +- .cache_flush = global_cache_flush, +- .create_gatt_table = intel_i915_create_gatt_table, +- .free_gatt_table = intel_i830_free_gatt_table, +- .insert_memory = intel_i915_insert_entries, +- .remove_memory = intel_i915_remove_entries, +- .alloc_by_type = intel_i830_alloc_by_type, +- .free_by_type = intel_i810_free_by_type, +- .agp_alloc_page = agp_generic_alloc_page, +- .agp_alloc_pages = agp_generic_alloc_pages, +- .agp_destroy_page = agp_generic_destroy_page, +- .agp_destroy_pages = agp_generic_destroy_pages, +- .agp_type_to_mask_type = intel_i830_type_to_mask_type, +- .chipset_flush = intel_i915_chipset_flush, +-#ifdef USE_PCI_DMA_API +- .agp_map_page = intel_agp_map_page, +- .agp_unmap_page = intel_agp_unmap_page, +- .agp_map_memory = intel_agp_map_memory, +- .agp_unmap_memory = intel_agp_unmap_memory, +-#endif +-}; +- +-static const struct agp_bridge_driver intel_i965_driver = { +- .owner = THIS_MODULE, +- .aperture_sizes = intel_i830_sizes, +- .size_type = FIXED_APER_SIZE, +- .num_aperture_sizes = 4, +- .needs_scratch_page = true, +- .configure = intel_i915_configure, +- .fetch_size = intel_i9xx_fetch_size, +- .cleanup = intel_i915_cleanup, +- .tlb_flush = intel_i810_tlbflush, +- .mask_memory = intel_i965_mask_memory, +- .masks = intel_i810_masks, +- .agp_enable = intel_i810_agp_enable, +- .cache_flush = global_cache_flush, +- .create_gatt_table = intel_i965_create_gatt_table, +- .free_gatt_table = intel_i830_free_gatt_table, +- .insert_memory = intel_i915_insert_entries, +- .remove_memory = intel_i915_remove_entries, +- .alloc_by_type = intel_i830_alloc_by_type, +- .free_by_type = intel_i810_free_by_type, +- .agp_alloc_page = agp_generic_alloc_page, +- .agp_alloc_pages = agp_generic_alloc_pages, +- .agp_destroy_page = agp_generic_destroy_page, +- .agp_destroy_pages = agp_generic_destroy_pages, +- .agp_type_to_mask_type = intel_i830_type_to_mask_type, +- .chipset_flush = intel_i915_chipset_flush, +-#ifdef USE_PCI_DMA_API +- .agp_map_page = intel_agp_map_page, +- .agp_unmap_page = intel_agp_unmap_page, +- .agp_map_memory = intel_agp_map_memory, +- .agp_unmap_memory = intel_agp_unmap_memory, +-#endif +-}; +- + static const struct agp_bridge_driver intel_7505_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_8xx_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = intel_7505_configure, + .fetch_size = intel_8xx_fetch_size, + .cleanup = intel_8xx_cleanup, +@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = { + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + }; + +-static const struct agp_bridge_driver intel_g33_driver = { +- .owner = THIS_MODULE, +- .aperture_sizes = intel_i830_sizes, +- .size_type = FIXED_APER_SIZE, +- .num_aperture_sizes = 4, +- .needs_scratch_page = true, +- .configure = intel_i915_configure, +- .fetch_size = intel_i9xx_fetch_size, +- .cleanup = intel_i915_cleanup, +- .tlb_flush = intel_i810_tlbflush, +- .mask_memory = intel_i965_mask_memory, +- .masks = intel_i810_masks, +- .agp_enable = intel_i810_agp_enable, +- .cache_flush = global_cache_flush, +- .create_gatt_table = intel_i915_create_gatt_table, +- .free_gatt_table = intel_i830_free_gatt_table, +- .insert_memory = intel_i915_insert_entries, +- .remove_memory = intel_i915_remove_entries, +- .alloc_by_type = intel_i830_alloc_by_type, +- .free_by_type = intel_i810_free_by_type, +- .agp_alloc_page = agp_generic_alloc_page, +- .agp_alloc_pages = agp_generic_alloc_pages, +- .agp_destroy_page = agp_generic_destroy_page, +- .agp_destroy_pages = agp_generic_destroy_pages, +- .agp_type_to_mask_type = intel_i830_type_to_mask_type, +- .chipset_flush = intel_i915_chipset_flush, +-#ifdef USE_PCI_DMA_API +- .agp_map_page = intel_agp_map_page, +- .agp_unmap_page = intel_agp_unmap_page, +- .agp_map_memory = intel_agp_map_memory, +- .agp_unmap_memory = intel_agp_unmap_memory, +-#endif +-}; +- + static int find_gmch(u16 device) + { + struct pci_dev *gmch_device; +@@ -2392,103 +726,137 @@ static int find_gmch(u16 device) + static const struct intel_driver_description { + unsigned int chip_id; + unsigned int gmch_chip_id; +- unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */ + char *name; + const struct agp_bridge_driver *driver; + const struct agp_bridge_driver *gmch_driver; + } intel_agp_chipsets[] = { +- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", ++ { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", + NULL, &intel_810_driver }, +- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", ++ { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", + NULL, &intel_810_driver }, +- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", ++ { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", + NULL, &intel_810_driver }, +- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", ++ { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", + &intel_815_driver, &intel_810_driver }, +- { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", ++ { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", + &intel_830mp_driver, &intel_830_driver }, +- { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", ++ { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", + &intel_845_driver, &intel_830_driver }, +- { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", ++ { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", + &intel_845_driver, &intel_830_driver }, +- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", ++ { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", + &intel_845_driver, &intel_830_driver }, +- { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", ++ { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", + &intel_845_driver, &intel_830_driver }, +- { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", ++ { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", ++ { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", ++ { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", ++ { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", ++ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", ++ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", ++ { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", ++ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", ++ { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", ++ { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", ++ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", ++ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, +- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", ++ { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, ++ { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", ++ { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", ++ { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", ++ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", ++ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, ++ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, + "GM45", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, ++ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, + "Eaglelake", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, ++ { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, + "Q45/Q43", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, ++ { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, + "G45/G43", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, ++ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, + "B43", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, ++ { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, + "G41", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, ++ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, + "HD Graphics", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, ++ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, ++ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, ++ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, ++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, + "Sandybridge", NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, ++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, + "Sandybridge", NULL, &intel_i965_driver }, +- { 0, 0, 0, NULL, NULL, NULL } ++ { 0, 0, NULL, NULL, NULL } + }; + ++static int __devinit intel_gmch_probe(struct pci_dev *pdev, ++ struct agp_bridge_data *bridge) ++{ ++ int i; ++ bridge->driver = NULL; ++ ++ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { ++ if ((intel_agp_chipsets[i].gmch_chip_id != 0) && ++ find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { ++ bridge->driver = ++ intel_agp_chipsets[i].gmch_driver; ++ break; ++ } ++ } ++ ++ if (!bridge->driver) ++ return 0; ++ ++ bridge->dev_private_data = &intel_private; ++ bridge->dev = pdev; ++ ++ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); ++ ++ if (bridge->driver->mask_memory == intel_i965_mask_memory) { ++ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) ++ dev_err(&intel_private.pcidev->dev, ++ "set gfx device dma mask 36bit failed!\n"); ++ else ++ pci_set_consistent_dma_mask(intel_private.pcidev, ++ DMA_BIT_MASK(36)); ++ } ++ ++ return 1; ++} ++ + static int __devinit agp_intel_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) + { +@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, + if (!bridge) + return -ENOMEM; + ++ bridge->capndx = cap_ptr; ++ ++ if (intel_gmch_probe(pdev, bridge)) ++ goto found_gmch; ++ + for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { + /* In case that multiple models of gfx chip may + stand on same host bridge type, this can be + sure we detect the right IGD. */ + if (pdev->device == intel_agp_chipsets[i].chip_id) { +- if ((intel_agp_chipsets[i].gmch_chip_id != 0) && +- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { +- bridge->driver = +- intel_agp_chipsets[i].gmch_driver; +- break; +- } else if (intel_agp_chipsets[i].multi_gmch_chip) { +- continue; +- } else { +- bridge->driver = intel_agp_chipsets[i].driver; +- break; +- } ++ bridge->driver = intel_agp_chipsets[i].driver; ++ break; + } + } + +@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, + return -ENODEV; + } + +- if (bridge->driver == NULL) { +- /* bridge has no AGP and no IGD detected */ ++ if (!bridge->driver) { + if (cap_ptr) + dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", +- intel_agp_chipsets[i].gmch_chip_id); ++ intel_agp_chipsets[i].gmch_chip_id); + agp_put_bridge(bridge); + return -ENODEV; + } + + bridge->dev = pdev; +- bridge->capndx = cap_ptr; +- bridge->dev_private_data = &intel_private; ++ bridge->dev_private_data = NULL; + + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); + +@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, + &bridge->mode); + } + +- if (bridge->driver->mask_memory == intel_i965_mask_memory) { +- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) +- dev_err(&intel_private.pcidev->dev, +- "set gfx device dma mask 36bit failed!\n"); +- else +- pci_set_consistent_dma_mask(intel_private.pcidev, +- DMA_BIT_MASK(36)); +- } +- ++found_gmch: + pci_set_drvdata(pdev, bridge); + err = agp_add_bridge(bridge); + if (!err) +@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev) + struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + int ret_val; + +- if (bridge->driver == &intel_generic_driver) +- intel_configure(); +- else if (bridge->driver == &intel_850_driver) +- intel_850_configure(); +- else if (bridge->driver == &intel_845_driver) +- intel_845_configure(); +- else if (bridge->driver == &intel_830mp_driver) +- intel_830mp_configure(); +- else if (bridge->driver == &intel_915_driver) +- intel_i915_configure(); +- else if (bridge->driver == &intel_830_driver) +- intel_i830_configure(); +- else if (bridge->driver == &intel_810_driver) +- intel_i810_configure(); +- else if (bridge->driver == &intel_i965_driver) +- intel_i915_configure(); ++ bridge->driver->configure(); + + ret_val = agp_rebind_memory(); + if (ret_val != 0) +diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h +new file mode 100644 +index 0000000..2547465 +--- /dev/null ++++ b/drivers/char/agp/intel-agp.h +@@ -0,0 +1,239 @@ ++/* ++ * Common Intel AGPGART and GTT definitions. ++ */ ++ ++/* Intel registers */ ++#define INTEL_APSIZE 0xb4 ++#define INTEL_ATTBASE 0xb8 ++#define INTEL_AGPCTRL 0xb0 ++#define INTEL_NBXCFG 0x50 ++#define INTEL_ERRSTS 0x91 ++ ++/* Intel i830 registers */ ++#define I830_GMCH_CTRL 0x52 ++#define I830_GMCH_ENABLED 0x4 ++#define I830_GMCH_MEM_MASK 0x1 ++#define I830_GMCH_MEM_64M 0x1 ++#define I830_GMCH_MEM_128M 0 ++#define I830_GMCH_GMS_MASK 0x70 ++#define I830_GMCH_GMS_DISABLED 0x00 ++#define I830_GMCH_GMS_LOCAL 0x10 ++#define I830_GMCH_GMS_STOLEN_512 0x20 ++#define I830_GMCH_GMS_STOLEN_1024 0x30 ++#define I830_GMCH_GMS_STOLEN_8192 0x40 ++#define I830_RDRAM_CHANNEL_TYPE 0x03010 ++#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) ++#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) ++ ++/* This one is for I830MP w. an external graphic card */ ++#define INTEL_I830_ERRSTS 0x92 ++ ++/* Intel 855GM/852GM registers */ ++#define I855_GMCH_GMS_MASK 0xF0 ++#define I855_GMCH_GMS_STOLEN_0M 0x0 ++#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) ++#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) ++#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) ++#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) ++#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) ++#define I85X_CAPID 0x44 ++#define I85X_VARIANT_MASK 0x7 ++#define I85X_VARIANT_SHIFT 5 ++#define I855_GME 0x0 ++#define I855_GM 0x4 ++#define I852_GME 0x2 ++#define I852_GM 0x5 ++ ++/* Intel i845 registers */ ++#define INTEL_I845_AGPM 0x51 ++#define INTEL_I845_ERRSTS 0xc8 ++ ++/* Intel i860 registers */ ++#define INTEL_I860_MCHCFG 0x50 ++#define INTEL_I860_ERRSTS 0xc8 ++ ++/* Intel i810 registers */ ++#define I810_GMADDR 0x10 ++#define I810_MMADDR 0x14 ++#define I810_PTE_BASE 0x10000 ++#define I810_PTE_MAIN_UNCACHED 0x00000000 ++#define I810_PTE_LOCAL 0x00000002 ++#define I810_PTE_VALID 0x00000001 ++#define I830_PTE_SYSTEM_CACHED 0x00000006 ++#define I810_SMRAM_MISCC 0x70 ++#define I810_GFX_MEM_WIN_SIZE 0x00010000 ++#define I810_GFX_MEM_WIN_32M 0x00010000 ++#define I810_GMS 0x000000c0 ++#define I810_GMS_DISABLE 0x00000000 ++#define I810_PGETBL_CTL 0x2020 ++#define I810_PGETBL_ENABLED 0x00000001 ++#define I965_PGETBL_SIZE_MASK 0x0000000e ++#define I965_PGETBL_SIZE_512KB (0 << 1) ++#define I965_PGETBL_SIZE_256KB (1 << 1) ++#define I965_PGETBL_SIZE_128KB (2 << 1) ++#define I965_PGETBL_SIZE_1MB (3 << 1) ++#define I965_PGETBL_SIZE_2MB (4 << 1) ++#define I965_PGETBL_SIZE_1_5MB (5 << 1) ++#define G33_PGETBL_SIZE_MASK (3 << 8) ++#define G33_PGETBL_SIZE_1M (1 << 8) ++#define G33_PGETBL_SIZE_2M (2 << 8) ++ ++#define I810_DRAM_CTL 0x3000 ++#define I810_DRAM_ROW_0 0x00000001 ++#define I810_DRAM_ROW_0_SDRAM 0x00000001 ++ ++/* Intel 815 register */ ++#define INTEL_815_APCONT 0x51 ++#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF ++ ++/* Intel i820 registers */ ++#define INTEL_I820_RDCR 0x51 ++#define INTEL_I820_ERRSTS 0xc8 ++ ++/* Intel i840 registers */ ++#define INTEL_I840_MCHCFG 0x50 ++#define INTEL_I840_ERRSTS 0xc8 ++ ++/* Intel i850 registers */ ++#define INTEL_I850_MCHCFG 0x50 ++#define INTEL_I850_ERRSTS 0xc8 ++ ++/* intel 915G registers */ ++#define I915_GMADDR 0x18 ++#define I915_MMADDR 0x10 ++#define I915_PTEADDR 0x1C ++#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) ++#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) ++#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) ++#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) ++#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) ++#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) ++#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) ++#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) ++ ++#define I915_IFPADDR 0x60 ++ ++/* Intel 965G registers */ ++#define I965_MSAC 0x62 ++#define I965_IFPADDR 0x70 ++ ++/* Intel 7505 registers */ ++#define INTEL_I7505_APSIZE 0x74 ++#define INTEL_I7505_NCAPID 0x60 ++#define INTEL_I7505_NISTAT 0x6c ++#define INTEL_I7505_ATTBASE 0x78 ++#define INTEL_I7505_ERRSTS 0x42 ++#define INTEL_I7505_AGPCTRL 0x70 ++#define INTEL_I7505_MCHCFG 0x50 ++ ++#define SNB_GMCH_CTRL 0x50 ++#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 ++#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) ++#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) ++#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) ++#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) ++#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) ++#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) ++#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) ++#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) ++#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) ++#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) ++#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) ++#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) ++#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) ++#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) ++#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) ++#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) ++#define SNB_GTT_SIZE_0M (0 << 8) ++#define SNB_GTT_SIZE_1M (1 << 8) ++#define SNB_GTT_SIZE_2M (2 << 8) ++#define SNB_GTT_SIZE_MASK (3 << 8) ++ ++/* pci devices ids */ ++#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 ++#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a ++#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 ++#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 ++#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 ++#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 ++#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 ++#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 ++#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 ++#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 ++#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 ++#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 ++#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 ++#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 ++#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC ++#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE ++#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 ++#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 ++#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 ++#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 ++#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 ++#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 ++#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 ++#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 ++#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 ++#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 ++#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 ++#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 ++#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 ++#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 ++#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 ++#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 ++#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 ++#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 ++#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 ++#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 ++#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 ++#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 ++#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 ++#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 ++#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 ++#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 ++#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a ++#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 ++ ++/* cover 915 and 945 variants */ ++#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) ++ ++#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) ++ ++#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) ++ ++#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) ++ ++#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) ++ ++#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ ++ IS_SNB) +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +new file mode 100644 +index 0000000..9344216 +--- /dev/null ++++ b/drivers/char/agp/intel-gtt.c +@@ -0,0 +1,1548 @@ ++/* ++ * Intel GTT (Graphics Translation Table) routines ++ * ++ * Caveat: This driver implements the linux agp interface, but this is far from ++ * a agp driver! GTT support ended up here for purely historical reasons: The ++ * old userspace intel graphics drivers needed an interface to map memory into ++ * the GTT. And the drm provides a default interface for graphic devices sitting ++ * on an agp port. So it made sense to fake the GTT support as an agp port to ++ * avoid having to create a new api. ++ * ++ * With gem this does not make much sense anymore, just needlessly complicates ++ * the code. But as long as the old graphics stack is still support, it's stuck ++ * here. ++ * ++ * /fairy-tale-mode off ++ */ ++ ++/* ++ * If we have Intel graphics, we're not going to have anything other than ++ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent ++ * on the Intel IOMMU support (CONFIG_DMAR). ++ * Only newer chipsets need to bother with this, of course. ++ */ ++#ifdef CONFIG_DMAR ++#define USE_PCI_DMA_API 1 ++#endif ++ ++static const struct aper_size_info_fixed intel_i810_sizes[] = ++{ ++ {64, 16384, 4}, ++ /* The 32M mode still requires a 64k gatt */ ++ {32, 8192, 4} ++}; ++ ++#define AGP_DCACHE_MEMORY 1 ++#define AGP_PHYS_MEMORY 2 ++#define INTEL_AGP_CACHED_MEMORY 3 ++ ++static struct gatt_mask intel_i810_masks[] = ++{ ++ {.mask = I810_PTE_VALID, .type = 0}, ++ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, ++ {.mask = I810_PTE_VALID, .type = 0}, ++ {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, ++ .type = INTEL_AGP_CACHED_MEMORY} ++}; ++ ++static struct _intel_private { ++ struct pci_dev *pcidev; /* device one */ ++ u8 __iomem *registers; ++ u32 __iomem *gtt; /* I915G */ ++ int num_dcache_entries; ++ /* gtt_entries is the number of gtt entries that are already mapped ++ * to stolen memory. Stolen memory is larger than the memory mapped ++ * through gtt_entries, as it includes some reserved space for the BIOS ++ * popup and for the GTT. ++ */ ++ int gtt_entries; /* i830+ */ ++ int gtt_total_size; ++ union { ++ void __iomem *i9xx_flush_page; ++ void *i8xx_flush_page; ++ }; ++ struct page *i8xx_page; ++ struct resource ifp_resource; ++ int resource_valid; ++} intel_private; ++ ++#ifdef USE_PCI_DMA_API ++static int intel_agp_map_page(struct page *page, dma_addr_t *ret) ++{ ++ *ret = pci_map_page(intel_private.pcidev, page, 0, ++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ if (pci_dma_mapping_error(intel_private.pcidev, *ret)) ++ return -EINVAL; ++ return 0; ++} ++ ++static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) ++{ ++ pci_unmap_page(intel_private.pcidev, dma, ++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++} ++ ++static void intel_agp_free_sglist(struct agp_memory *mem) ++{ ++ struct sg_table st; ++ ++ st.sgl = mem->sg_list; ++ st.orig_nents = st.nents = mem->page_count; ++ ++ sg_free_table(&st); ++ ++ mem->sg_list = NULL; ++ mem->num_sg = 0; ++} ++ ++static int intel_agp_map_memory(struct agp_memory *mem) ++{ ++ struct sg_table st; ++ struct scatterlist *sg; ++ int i; ++ ++ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); ++ ++ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ mem->sg_list = sg = st.sgl; ++ ++ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) ++ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); ++ ++ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, ++ mem->page_count, PCI_DMA_BIDIRECTIONAL); ++ if (unlikely(!mem->num_sg)) { ++ intel_agp_free_sglist(mem); ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static void intel_agp_unmap_memory(struct agp_memory *mem) ++{ ++ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); ++ ++ pci_unmap_sg(intel_private.pcidev, mem->sg_list, ++ mem->page_count, PCI_DMA_BIDIRECTIONAL); ++ intel_agp_free_sglist(mem); ++} ++ ++static void intel_agp_insert_sg_entries(struct agp_memory *mem, ++ off_t pg_start, int mask_type) ++{ ++ struct scatterlist *sg; ++ int i, j; ++ ++ j = pg_start; ++ ++ WARN_ON(!mem->num_sg); ++ ++ if (mem->num_sg == mem->page_count) { ++ for_each_sg(mem->sg_list, sg, mem->page_count, i) { ++ writel(agp_bridge->driver->mask_memory(agp_bridge, ++ sg_dma_address(sg), mask_type), ++ intel_private.gtt+j); ++ j++; ++ } ++ } else { ++ /* sg may merge pages, but we have to separate ++ * per-page addr for GTT */ ++ unsigned int len, m; ++ ++ for_each_sg(mem->sg_list, sg, mem->num_sg, i) { ++ len = sg_dma_len(sg) / PAGE_SIZE; ++ for (m = 0; m < len; m++) { ++ writel(agp_bridge->driver->mask_memory(agp_bridge, ++ sg_dma_address(sg) + m * PAGE_SIZE, ++ mask_type), ++ intel_private.gtt+j); ++ j++; ++ } ++ } ++ } ++ readl(intel_private.gtt+j-1); ++} ++ ++#else ++ ++static void intel_agp_insert_sg_entries(struct agp_memory *mem, ++ off_t pg_start, int mask_type) ++{ ++ int i, j; ++ u32 cache_bits = 0; ++ ++ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) ++ { ++ cache_bits = I830_PTE_SYSTEM_CACHED; ++ } ++ ++ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { ++ writel(agp_bridge->driver->mask_memory(agp_bridge, ++ page_to_phys(mem->pages[i]), mask_type), ++ intel_private.gtt+j); ++ } ++ ++ readl(intel_private.gtt+j-1); ++} ++ ++#endif ++ ++static int intel_i810_fetch_size(void) ++{ ++ u32 smram_miscc; ++ struct aper_size_info_fixed *values; ++ ++ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); ++ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); ++ ++ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { ++ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); ++ return 0; ++ } ++ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { ++ agp_bridge->current_size = (void *) (values + 1); ++ agp_bridge->aperture_size_idx = 1; ++ return values[1].size; ++ } else { ++ agp_bridge->current_size = (void *) (values); ++ agp_bridge->aperture_size_idx = 0; ++ return values[0].size; ++ } ++ ++ return 0; ++} ++ ++static int intel_i810_configure(void) ++{ ++ struct aper_size_info_fixed *current_size; ++ u32 temp; ++ int i; ++ ++ current_size = A_SIZE_FIX(agp_bridge->current_size); ++ ++ if (!intel_private.registers) { ++ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); ++ temp &= 0xfff80000; ++ ++ intel_private.registers = ioremap(temp, 128 * 4096); ++ if (!intel_private.registers) { ++ dev_err(&intel_private.pcidev->dev, ++ "can't remap memory\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ if ((readl(intel_private.registers+I810_DRAM_CTL) ++ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { ++ /* This will need to be dynamically assigned */ ++ dev_info(&intel_private.pcidev->dev, ++ "detected 4MB dedicated video ram\n"); ++ intel_private.num_dcache_entries = 1024; ++ } ++ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); ++ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); ++ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); ++ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ ++ ++ if (agp_bridge->driver->needs_scratch_page) { ++ for (i = 0; i < current_size->num_entries; i++) { ++ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ ++ } ++ global_cache_flush(); ++ return 0; ++} ++ ++static void intel_i810_cleanup(void) ++{ ++ writel(0, intel_private.registers+I810_PGETBL_CTL); ++ readl(intel_private.registers); /* PCI Posting. */ ++ iounmap(intel_private.registers); ++} ++ ++static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) ++{ ++ return; ++} ++ ++/* Exists to support ARGB cursors */ ++static struct page *i8xx_alloc_pages(void) ++{ ++ struct page *page; ++ ++ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); ++ if (page == NULL) ++ return NULL; ++ ++ if (set_pages_uc(page, 4) < 0) { ++ set_pages_wb(page, 4); ++ __free_pages(page, 2); ++ return NULL; ++ } ++ get_page(page); ++ atomic_inc(&agp_bridge->current_memory_agp); ++ return page; ++} ++ ++static void i8xx_destroy_pages(struct page *page) ++{ ++ if (page == NULL) ++ return; ++ ++ set_pages_wb(page, 4); ++ put_page(page); ++ __free_pages(page, 2); ++ atomic_dec(&agp_bridge->current_memory_agp); ++} ++ ++static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, ++ int type) ++{ ++ if (type < AGP_USER_TYPES) ++ return type; ++ else if (type == AGP_USER_CACHED_MEMORY) ++ return INTEL_AGP_CACHED_MEMORY; ++ else ++ return 0; ++} ++ ++static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, ++ int type) ++{ ++ int i, j, num_entries; ++ void *temp; ++ int ret = -EINVAL; ++ int mask_type; ++ ++ if (mem->page_count == 0) ++ goto out; ++ ++ temp = agp_bridge->current_size; ++ num_entries = A_SIZE_FIX(temp)->num_entries; ++ ++ if ((pg_start + mem->page_count) > num_entries) ++ goto out_err; ++ ++ ++ for (j = pg_start; j < (pg_start + mem->page_count); j++) { ++ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { ++ ret = -EBUSY; ++ goto out_err; ++ } ++ } ++ ++ if (type != mem->type) ++ goto out_err; ++ ++ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); ++ ++ switch (mask_type) { ++ case AGP_DCACHE_MEMORY: ++ if (!mem->is_flushed) ++ global_cache_flush(); ++ for (i = pg_start; i < (pg_start + mem->page_count); i++) { ++ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, ++ intel_private.registers+I810_PTE_BASE+(i*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); ++ break; ++ case AGP_PHYS_MEMORY: ++ case AGP_NORMAL_MEMORY: ++ if (!mem->is_flushed) ++ global_cache_flush(); ++ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { ++ writel(agp_bridge->driver->mask_memory(agp_bridge, ++ page_to_phys(mem->pages[i]), mask_type), ++ intel_private.registers+I810_PTE_BASE+(j*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); ++ break; ++ default: ++ goto out_err; ++ } ++ ++out: ++ ret = 0; ++out_err: ++ mem->is_flushed = true; ++ return ret; ++} ++ ++static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, ++ int type) ++{ ++ int i; ++ ++ if (mem->page_count == 0) ++ return 0; ++ ++ for (i = pg_start; i < (mem->page_count + pg_start); i++) { ++ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); ++ ++ return 0; ++} ++ ++/* ++ * The i810/i830 requires a physical address to program its mouse ++ * pointer into hardware. ++ * However the Xserver still writes to it through the agp aperture. ++ */ ++static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) ++{ ++ struct agp_memory *new; ++ struct page *page; ++ ++ switch (pg_count) { ++ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); ++ break; ++ case 4: ++ /* kludge to get 4 physical pages for ARGB cursor */ ++ page = i8xx_alloc_pages(); ++ break; ++ default: ++ return NULL; ++ } ++ ++ if (page == NULL) ++ return NULL; ++ ++ new = agp_create_memory(pg_count); ++ if (new == NULL) ++ return NULL; ++ ++ new->pages[0] = page; ++ if (pg_count == 4) { ++ /* kludge to get 4 physical pages for ARGB cursor */ ++ new->pages[1] = new->pages[0] + 1; ++ new->pages[2] = new->pages[1] + 1; ++ new->pages[3] = new->pages[2] + 1; ++ } ++ new->page_count = pg_count; ++ new->num_scratch_pages = pg_count; ++ new->type = AGP_PHYS_MEMORY; ++ new->physical = page_to_phys(new->pages[0]); ++ return new; ++} ++ ++static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) ++{ ++ struct agp_memory *new; ++ ++ if (type == AGP_DCACHE_MEMORY) { ++ if (pg_count != intel_private.num_dcache_entries) ++ return NULL; ++ ++ new = agp_create_memory(1); ++ if (new == NULL) ++ return NULL; ++ ++ new->type = AGP_DCACHE_MEMORY; ++ new->page_count = pg_count; ++ new->num_scratch_pages = 0; ++ agp_free_page_array(new); ++ return new; ++ } ++ if (type == AGP_PHYS_MEMORY) ++ return alloc_agpphysmem_i8xx(pg_count, type); ++ return NULL; ++} ++ ++static void intel_i810_free_by_type(struct agp_memory *curr) ++{ ++ agp_free_key(curr->key); ++ if (curr->type == AGP_PHYS_MEMORY) { ++ if (curr->page_count == 4) ++ i8xx_destroy_pages(curr->pages[0]); ++ else { ++ agp_bridge->driver->agp_destroy_page(curr->pages[0], ++ AGP_PAGE_DESTROY_UNMAP); ++ agp_bridge->driver->agp_destroy_page(curr->pages[0], ++ AGP_PAGE_DESTROY_FREE); ++ } ++ agp_free_page_array(curr); ++ } ++ kfree(curr); ++} ++ ++static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, ++ dma_addr_t addr, int type) ++{ ++ /* Type checking must be done elsewhere */ ++ return addr | bridge->driver->masks[type].mask; ++} ++ ++static struct aper_size_info_fixed intel_i830_sizes[] = ++{ ++ {128, 32768, 5}, ++ /* The 64M mode still requires a 128k gatt */ ++ {64, 16384, 5}, ++ {256, 65536, 6}, ++ {512, 131072, 7}, ++}; ++ ++static void intel_i830_init_gtt_entries(void) ++{ ++ u16 gmch_ctrl; ++ int gtt_entries = 0; ++ u8 rdct; ++ int local = 0; ++ static const int ddt[4] = { 0, 16, 32, 64 }; ++ int size; /* reserved space (in kb) at the top of stolen memory */ ++ ++ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); ++ ++ if (IS_I965) { ++ u32 pgetbl_ctl; ++ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); ++ ++ /* The 965 has a field telling us the size of the GTT, ++ * which may be larger than what is necessary to map the ++ * aperture. ++ */ ++ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { ++ case I965_PGETBL_SIZE_128KB: ++ size = 128; ++ break; ++ case I965_PGETBL_SIZE_256KB: ++ size = 256; ++ break; ++ case I965_PGETBL_SIZE_512KB: ++ size = 512; ++ break; ++ case I965_PGETBL_SIZE_1MB: ++ size = 1024; ++ break; ++ case I965_PGETBL_SIZE_2MB: ++ size = 2048; ++ break; ++ case I965_PGETBL_SIZE_1_5MB: ++ size = 1024 + 512; ++ break; ++ default: ++ dev_info(&intel_private.pcidev->dev, ++ "unknown page table size, assuming 512KB\n"); ++ size = 512; ++ } ++ size += 4; /* add in BIOS popup space */ ++ } else if (IS_G33 && !IS_PINEVIEW) { ++ /* G33's GTT size defined in gmch_ctrl */ ++ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { ++ case G33_PGETBL_SIZE_1M: ++ size = 1024; ++ break; ++ case G33_PGETBL_SIZE_2M: ++ size = 2048; ++ break; ++ default: ++ dev_info(&agp_bridge->dev->dev, ++ "unknown page table size 0x%x, assuming 512KB\n", ++ (gmch_ctrl & G33_PGETBL_SIZE_MASK)); ++ size = 512; ++ } ++ size += 4; ++ } else if (IS_G4X || IS_PINEVIEW) { ++ /* On 4 series hardware, GTT stolen is separate from graphics ++ * stolen, ignore it in stolen gtt entries counting. However, ++ * 4KB of the stolen memory doesn't get mapped to the GTT. ++ */ ++ size = 4; ++ } else { ++ /* On previous hardware, the GTT size was just what was ++ * required to map the aperture. ++ */ ++ size = agp_bridge->driver->fetch_size() + 4; ++ } ++ ++ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { ++ switch (gmch_ctrl & I830_GMCH_GMS_MASK) { ++ case I830_GMCH_GMS_STOLEN_512: ++ gtt_entries = KB(512) - KB(size); ++ break; ++ case I830_GMCH_GMS_STOLEN_1024: ++ gtt_entries = MB(1) - KB(size); ++ break; ++ case I830_GMCH_GMS_STOLEN_8192: ++ gtt_entries = MB(8) - KB(size); ++ break; ++ case I830_GMCH_GMS_LOCAL: ++ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); ++ gtt_entries = (I830_RDRAM_ND(rdct) + 1) * ++ MB(ddt[I830_RDRAM_DDT(rdct)]); ++ local = 1; ++ break; ++ default: ++ gtt_entries = 0; ++ break; ++ } ++ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { ++ /* ++ * SandyBridge has new memory control reg at 0x50.w ++ */ ++ u16 snb_gmch_ctl; ++ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); ++ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { ++ case SNB_GMCH_GMS_STOLEN_32M: ++ gtt_entries = MB(32) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_64M: ++ gtt_entries = MB(64) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_96M: ++ gtt_entries = MB(96) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_128M: ++ gtt_entries = MB(128) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_160M: ++ gtt_entries = MB(160) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_192M: ++ gtt_entries = MB(192) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_224M: ++ gtt_entries = MB(224) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_256M: ++ gtt_entries = MB(256) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_288M: ++ gtt_entries = MB(288) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_320M: ++ gtt_entries = MB(320) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_352M: ++ gtt_entries = MB(352) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_384M: ++ gtt_entries = MB(384) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_416M: ++ gtt_entries = MB(416) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_448M: ++ gtt_entries = MB(448) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_480M: ++ gtt_entries = MB(480) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_512M: ++ gtt_entries = MB(512) - KB(size); ++ break; ++ } ++ } else { ++ switch (gmch_ctrl & I855_GMCH_GMS_MASK) { ++ case I855_GMCH_GMS_STOLEN_1M: ++ gtt_entries = MB(1) - KB(size); ++ break; ++ case I855_GMCH_GMS_STOLEN_4M: ++ gtt_entries = MB(4) - KB(size); ++ break; ++ case I855_GMCH_GMS_STOLEN_8M: ++ gtt_entries = MB(8) - KB(size); ++ break; ++ case I855_GMCH_GMS_STOLEN_16M: ++ gtt_entries = MB(16) - KB(size); ++ break; ++ case I855_GMCH_GMS_STOLEN_32M: ++ gtt_entries = MB(32) - KB(size); ++ break; ++ case I915_GMCH_GMS_STOLEN_48M: ++ /* Check it's really I915G */ ++ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) ++ gtt_entries = MB(48) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case I915_GMCH_GMS_STOLEN_64M: ++ /* Check it's really I915G */ ++ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) ++ gtt_entries = MB(64) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case G33_GMCH_GMS_STOLEN_128M: ++ if (IS_G33 || IS_I965 || IS_G4X) ++ gtt_entries = MB(128) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case G33_GMCH_GMS_STOLEN_256M: ++ if (IS_G33 || IS_I965 || IS_G4X) ++ gtt_entries = MB(256) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_96M: ++ if (IS_I965 || IS_G4X) ++ gtt_entries = MB(96) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_160M: ++ if (IS_I965 || IS_G4X) ++ gtt_entries = MB(160) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_224M: ++ if (IS_I965 || IS_G4X) ++ gtt_entries = MB(224) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_352M: ++ if (IS_I965 || IS_G4X) ++ gtt_entries = MB(352) - KB(size); ++ else ++ gtt_entries = 0; ++ break; ++ default: ++ gtt_entries = 0; ++ break; ++ } ++ } ++ if (gtt_entries > 0) { ++ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", ++ gtt_entries / KB(1), local ? "local" : "stolen"); ++ gtt_entries /= KB(4); ++ } else { ++ dev_info(&agp_bridge->dev->dev, ++ "no pre-allocated video memory detected\n"); ++ gtt_entries = 0; ++ } ++ ++ intel_private.gtt_entries = gtt_entries; ++} ++ ++static void intel_i830_fini_flush(void) ++{ ++ kunmap(intel_private.i8xx_page); ++ intel_private.i8xx_flush_page = NULL; ++ unmap_page_from_agp(intel_private.i8xx_page); ++ ++ __free_page(intel_private.i8xx_page); ++ intel_private.i8xx_page = NULL; ++} ++ ++static void intel_i830_setup_flush(void) ++{ ++ /* return if we've already set the flush mechanism up */ ++ if (intel_private.i8xx_page) ++ return; ++ ++ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ if (!intel_private.i8xx_page) ++ return; ++ ++ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); ++ if (!intel_private.i8xx_flush_page) ++ intel_i830_fini_flush(); ++} ++ ++/* The chipset_flush interface needs to get data that has already been ++ * flushed out of the CPU all the way out to main memory, because the GPU ++ * doesn't snoop those buffers. ++ * ++ * The 8xx series doesn't have the same lovely interface for flushing the ++ * chipset write buffers that the later chips do. According to the 865 ++ * specs, it's 64 octwords, or 1KB. So, to get those previous things in ++ * that buffer out, we just fill 1KB and clflush it out, on the assumption ++ * that it'll push whatever was in there out. It appears to work. ++ */ ++static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) ++{ ++ unsigned int *pg = intel_private.i8xx_flush_page; ++ ++ memset(pg, 0, 1024); ++ ++ if (cpu_has_clflush) ++ clflush_cache_range(pg, 1024); ++ else if (wbinvd_on_all_cpus() != 0) ++ printk(KERN_ERR "Timed out waiting for cache flush.\n"); ++} ++ ++/* The intel i830 automatically initializes the agp aperture during POST. ++ * Use the memory already set aside for in the GTT. ++ */ ++static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) ++{ ++ int page_order; ++ struct aper_size_info_fixed *size; ++ int num_entries; ++ u32 temp; ++ ++ size = agp_bridge->current_size; ++ page_order = size->page_order; ++ num_entries = size->num_entries; ++ agp_bridge->gatt_table_real = NULL; ++ ++ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); ++ temp &= 0xfff80000; ++ ++ intel_private.registers = ioremap(temp, 128 * 4096); ++ if (!intel_private.registers) ++ return -ENOMEM; ++ ++ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; ++ global_cache_flush(); /* FIXME: ?? */ ++ ++ /* we have to call this as early as possible after the MMIO base address is known */ ++ intel_i830_init_gtt_entries(); ++ ++ agp_bridge->gatt_table = NULL; ++ ++ agp_bridge->gatt_bus_addr = temp; ++ ++ return 0; ++} ++ ++/* Return the gatt table to a sane state. Use the top of stolen ++ * memory for the GTT. ++ */ ++static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) ++{ ++ return 0; ++} ++ ++static int intel_i830_fetch_size(void) ++{ ++ u16 gmch_ctrl; ++ struct aper_size_info_fixed *values; ++ ++ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); ++ ++ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && ++ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { ++ /* 855GM/852GM/865G has 128MB aperture size */ ++ agp_bridge->current_size = (void *) values; ++ agp_bridge->aperture_size_idx = 0; ++ return values[0].size; ++ } ++ ++ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); ++ ++ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { ++ agp_bridge->current_size = (void *) values; ++ agp_bridge->aperture_size_idx = 0; ++ return values[0].size; ++ } else { ++ agp_bridge->current_size = (void *) (values + 1); ++ agp_bridge->aperture_size_idx = 1; ++ return values[1].size; ++ } ++ ++ return 0; ++} ++ ++static int intel_i830_configure(void) ++{ ++ struct aper_size_info_fixed *current_size; ++ u32 temp; ++ u16 gmch_ctrl; ++ int i; ++ ++ current_size = A_SIZE_FIX(agp_bridge->current_size); ++ ++ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); ++ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); ++ ++ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); ++ gmch_ctrl |= I830_GMCH_ENABLED; ++ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); ++ ++ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); ++ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ ++ ++ if (agp_bridge->driver->needs_scratch_page) { ++ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { ++ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ ++ } ++ ++ global_cache_flush(); ++ ++ intel_i830_setup_flush(); ++ return 0; ++} ++ ++static void intel_i830_cleanup(void) ++{ ++ iounmap(intel_private.registers); ++} ++ ++static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, ++ int type) ++{ ++ int i, j, num_entries; ++ void *temp; ++ int ret = -EINVAL; ++ int mask_type; ++ ++ if (mem->page_count == 0) ++ goto out; ++ ++ temp = agp_bridge->current_size; ++ num_entries = A_SIZE_FIX(temp)->num_entries; ++ ++ if (pg_start < intel_private.gtt_entries) { ++ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, ++ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", ++ pg_start, intel_private.gtt_entries); ++ ++ dev_info(&intel_private.pcidev->dev, ++ "trying to insert into local/stolen memory\n"); ++ goto out_err; ++ } ++ ++ if ((pg_start + mem->page_count) > num_entries) ++ goto out_err; ++ ++ /* The i830 can't check the GTT for entries since its read only, ++ * depend on the caller to make the correct offset decisions. ++ */ ++ ++ if (type != mem->type) ++ goto out_err; ++ ++ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); ++ ++ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && ++ mask_type != INTEL_AGP_CACHED_MEMORY) ++ goto out_err; ++ ++ if (!mem->is_flushed) ++ global_cache_flush(); ++ ++ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { ++ writel(agp_bridge->driver->mask_memory(agp_bridge, ++ page_to_phys(mem->pages[i]), mask_type), ++ intel_private.registers+I810_PTE_BASE+(j*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); ++ ++out: ++ ret = 0; ++out_err: ++ mem->is_flushed = true; ++ return ret; ++} ++ ++static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, ++ int type) ++{ ++ int i; ++ ++ if (mem->page_count == 0) ++ return 0; ++ ++ if (pg_start < intel_private.gtt_entries) { ++ dev_info(&intel_private.pcidev->dev, ++ "trying to disable local/stolen memory\n"); ++ return -EINVAL; ++ } ++ ++ for (i = pg_start; i < (mem->page_count + pg_start); i++) { ++ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); ++ } ++ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); ++ ++ return 0; ++} ++ ++static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) ++{ ++ if (type == AGP_PHYS_MEMORY) ++ return alloc_agpphysmem_i8xx(pg_count, type); ++ /* always return NULL for other allocation types for now */ ++ return NULL; ++} ++ ++static int intel_alloc_chipset_flush_resource(void) ++{ ++ int ret; ++ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, ++ PAGE_SIZE, PCIBIOS_MIN_MEM, 0, ++ pcibios_align_resource, agp_bridge->dev); ++ ++ return ret; ++} ++ ++static void intel_i915_setup_chipset_flush(void) ++{ ++ int ret; ++ u32 temp; ++ ++ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); ++ if (!(temp & 0x1)) { ++ intel_alloc_chipset_flush_resource(); ++ intel_private.resource_valid = 1; ++ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); ++ } else { ++ temp &= ~1; ++ ++ intel_private.resource_valid = 1; ++ intel_private.ifp_resource.start = temp; ++ intel_private.ifp_resource.end = temp + PAGE_SIZE; ++ ret = request_resource(&iomem_resource, &intel_private.ifp_resource); ++ /* some BIOSes reserve this area in a pnp some don't */ ++ if (ret) ++ intel_private.resource_valid = 0; ++ } ++} ++ ++static void intel_i965_g33_setup_chipset_flush(void) ++{ ++ u32 temp_hi, temp_lo; ++ int ret; ++ ++ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); ++ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); ++ ++ if (!(temp_lo & 0x1)) { ++ ++ intel_alloc_chipset_flush_resource(); ++ ++ intel_private.resource_valid = 1; ++ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, ++ upper_32_bits(intel_private.ifp_resource.start)); ++ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); ++ } else { ++ u64 l64; ++ ++ temp_lo &= ~0x1; ++ l64 = ((u64)temp_hi << 32) | temp_lo; ++ ++ intel_private.resource_valid = 1; ++ intel_private.ifp_resource.start = l64; ++ intel_private.ifp_resource.end = l64 + PAGE_SIZE; ++ ret = request_resource(&iomem_resource, &intel_private.ifp_resource); ++ /* some BIOSes reserve this area in a pnp some don't */ ++ if (ret) ++ intel_private.resource_valid = 0; ++ } ++} ++ ++static void intel_i9xx_setup_flush(void) ++{ ++ /* return if already configured */ ++ if (intel_private.ifp_resource.start) ++ return; ++ ++ if (IS_SNB) ++ return; ++ ++ /* setup a resource for this object */ ++ intel_private.ifp_resource.name = "Intel Flush Page"; ++ intel_private.ifp_resource.flags = IORESOURCE_MEM; ++ ++ /* Setup chipset flush for 915 */ ++ if (IS_I965 || IS_G33 || IS_G4X) { ++ intel_i965_g33_setup_chipset_flush(); ++ } else { ++ intel_i915_setup_chipset_flush(); ++ } ++ ++ if (intel_private.ifp_resource.start) { ++ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); ++ if (!intel_private.i9xx_flush_page) ++ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); ++ } ++} ++ ++static int intel_i9xx_configure(void) ++{ ++ struct aper_size_info_fixed *current_size; ++ u32 temp; ++ u16 gmch_ctrl; ++ int i; ++ ++ current_size = A_SIZE_FIX(agp_bridge->current_size); ++ ++ pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); ++ ++ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); ++ ++ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); ++ gmch_ctrl |= I830_GMCH_ENABLED; ++ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); ++ ++ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); ++ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ ++ ++ if (agp_bridge->driver->needs_scratch_page) { ++ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { ++ writel(agp_bridge->scratch_page, intel_private.gtt+i); ++ } ++ readl(intel_private.gtt+i-1); /* PCI Posting. */ ++ } ++ ++ global_cache_flush(); ++ ++ intel_i9xx_setup_flush(); ++ ++ return 0; ++} ++ ++static void intel_i915_cleanup(void) ++{ ++ if (intel_private.i9xx_flush_page) ++ iounmap(intel_private.i9xx_flush_page); ++ if (intel_private.resource_valid) ++ release_resource(&intel_private.ifp_resource); ++ intel_private.ifp_resource.start = 0; ++ intel_private.resource_valid = 0; ++ iounmap(intel_private.gtt); ++ iounmap(intel_private.registers); ++} ++ ++static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) ++{ ++ if (intel_private.i9xx_flush_page) ++ writel(1, intel_private.i9xx_flush_page); ++} ++ ++static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, ++ int type) ++{ ++ int num_entries; ++ void *temp; ++ int ret = -EINVAL; ++ int mask_type; ++ ++ if (mem->page_count == 0) ++ goto out; ++ ++ temp = agp_bridge->current_size; ++ num_entries = A_SIZE_FIX(temp)->num_entries; ++ ++ if (pg_start < intel_private.gtt_entries) { ++ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, ++ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", ++ pg_start, intel_private.gtt_entries); ++ ++ dev_info(&intel_private.pcidev->dev, ++ "trying to insert into local/stolen memory\n"); ++ goto out_err; ++ } ++ ++ if ((pg_start + mem->page_count) > num_entries) ++ goto out_err; ++ ++ /* The i915 can't check the GTT for entries since it's read only; ++ * depend on the caller to make the correct offset decisions. ++ */ ++ ++ if (type != mem->type) ++ goto out_err; ++ ++ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); ++ ++ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && ++ mask_type != INTEL_AGP_CACHED_MEMORY) ++ goto out_err; ++ ++ if (!mem->is_flushed) ++ global_cache_flush(); ++ ++ intel_agp_insert_sg_entries(mem, pg_start, mask_type); ++ ++ out: ++ ret = 0; ++ out_err: ++ mem->is_flushed = true; ++ return ret; ++} ++ ++static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, ++ int type) ++{ ++ int i; ++ ++ if (mem->page_count == 0) ++ return 0; ++ ++ if (pg_start < intel_private.gtt_entries) { ++ dev_info(&intel_private.pcidev->dev, ++ "trying to disable local/stolen memory\n"); ++ return -EINVAL; ++ } ++ ++ for (i = pg_start; i < (mem->page_count + pg_start); i++) ++ writel(agp_bridge->scratch_page, intel_private.gtt+i); ++ ++ readl(intel_private.gtt+i-1); ++ ++ return 0; ++} ++ ++/* Return the aperture size by just checking the resource length. The effect ++ * described in the spec of the MSAC registers is just changing of the ++ * resource size. ++ */ ++static int intel_i9xx_fetch_size(void) ++{ ++ int num_sizes = ARRAY_SIZE(intel_i830_sizes); ++ int aper_size; /* size in megabytes */ ++ int i; ++ ++ aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); ++ ++ for (i = 0; i < num_sizes; i++) { ++ if (aper_size == intel_i830_sizes[i].size) { ++ agp_bridge->current_size = intel_i830_sizes + i; ++ return aper_size; ++ } ++ } ++ ++ return 0; ++} ++ ++static int intel_i915_get_gtt_size(void) ++{ ++ int size; ++ ++ if (IS_G33) { ++ u16 gmch_ctrl; ++ ++ /* G33's GTT size defined in gmch_ctrl */ ++ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); ++ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { ++ case G33_PGETBL_SIZE_1M: ++ size = 1024; ++ break; ++ case G33_PGETBL_SIZE_2M: ++ size = 2048; ++ break; ++ default: ++ dev_info(&agp_bridge->dev->dev, ++ "unknown page table size 0x%x, assuming 512KB\n", ++ (gmch_ctrl & G33_PGETBL_SIZE_MASK)); ++ size = 512; ++ } ++ } else { ++ /* On previous hardware, the GTT size was just what was ++ * required to map the aperture. ++ */ ++ size = agp_bridge->driver->fetch_size(); ++ } ++ ++ return KB(size); ++} ++ ++/* The intel i915 automatically initializes the agp aperture during POST. ++ * Use the memory already set aside for in the GTT. ++ */ ++static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) ++{ ++ int page_order; ++ struct aper_size_info_fixed *size; ++ int num_entries; ++ u32 temp, temp2; ++ int gtt_map_size; ++ ++ size = agp_bridge->current_size; ++ page_order = size->page_order; ++ num_entries = size->num_entries; ++ agp_bridge->gatt_table_real = NULL; ++ ++ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); ++ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); ++ ++ gtt_map_size = intel_i915_get_gtt_size(); ++ ++ intel_private.gtt = ioremap(temp2, gtt_map_size); ++ if (!intel_private.gtt) ++ return -ENOMEM; ++ ++ intel_private.gtt_total_size = gtt_map_size / 4; ++ ++ temp &= 0xfff80000; ++ ++ intel_private.registers = ioremap(temp, 128 * 4096); ++ if (!intel_private.registers) { ++ iounmap(intel_private.gtt); ++ return -ENOMEM; ++ } ++ ++ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; ++ global_cache_flush(); /* FIXME: ? */ ++ ++ /* we have to call this as early as possible after the MMIO base address is known */ ++ intel_i830_init_gtt_entries(); ++ ++ agp_bridge->gatt_table = NULL; ++ ++ agp_bridge->gatt_bus_addr = temp; ++ ++ return 0; ++} ++ ++/* ++ * The i965 supports 36-bit physical addresses, but to keep ++ * the format of the GTT the same, the bits that don't fit ++ * in a 32-bit word are shifted down to bits 4..7. ++ * ++ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" ++ * is always zero on 32-bit architectures, so no need to make ++ * this conditional. ++ */ ++static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, ++ dma_addr_t addr, int type) ++{ ++ /* Shift high bits down */ ++ addr |= (addr >> 28) & 0xf0; ++ ++ /* Type checking must be done elsewhere */ ++ return addr | bridge->driver->masks[type].mask; ++} ++ ++static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) ++{ ++ u16 snb_gmch_ctl; ++ ++ switch (agp_bridge->dev->device) { ++ case PCI_DEVICE_ID_INTEL_GM45_HB: ++ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: ++ case PCI_DEVICE_ID_INTEL_Q45_HB: ++ case PCI_DEVICE_ID_INTEL_G45_HB: ++ case PCI_DEVICE_ID_INTEL_G41_HB: ++ case PCI_DEVICE_ID_INTEL_B43_HB: ++ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: ++ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: ++ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: ++ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: ++ *gtt_offset = *gtt_size = MB(2); ++ break; ++ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: ++ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: ++ *gtt_offset = MB(2); ++ ++ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); ++ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { ++ default: ++ case SNB_GTT_SIZE_0M: ++ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); ++ *gtt_size = MB(0); ++ break; ++ case SNB_GTT_SIZE_1M: ++ *gtt_size = MB(1); ++ break; ++ case SNB_GTT_SIZE_2M: ++ *gtt_size = MB(2); ++ break; ++ } ++ break; ++ default: ++ *gtt_offset = *gtt_size = KB(512); ++ } ++} ++ ++/* The intel i965 automatically initializes the agp aperture during POST. ++ * Use the memory already set aside for in the GTT. ++ */ ++static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) ++{ ++ int page_order; ++ struct aper_size_info_fixed *size; ++ int num_entries; ++ u32 temp; ++ int gtt_offset, gtt_size; ++ ++ size = agp_bridge->current_size; ++ page_order = size->page_order; ++ num_entries = size->num_entries; ++ agp_bridge->gatt_table_real = NULL; ++ ++ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); ++ ++ temp &= 0xfff00000; ++ ++ intel_i965_get_gtt_range(>t_offset, >t_size); ++ ++ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); ++ ++ if (!intel_private.gtt) ++ return -ENOMEM; ++ ++ intel_private.gtt_total_size = gtt_size / 4; ++ ++ intel_private.registers = ioremap(temp, 128 * 4096); ++ if (!intel_private.registers) { ++ iounmap(intel_private.gtt); ++ return -ENOMEM; ++ } ++ ++ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; ++ global_cache_flush(); /* FIXME: ? */ ++ ++ /* we have to call this as early as possible after the MMIO base address is known */ ++ intel_i830_init_gtt_entries(); ++ ++ agp_bridge->gatt_table = NULL; ++ ++ agp_bridge->gatt_bus_addr = temp; ++ ++ return 0; ++} ++ ++static const struct agp_bridge_driver intel_810_driver = { ++ .owner = THIS_MODULE, ++ .aperture_sizes = intel_i810_sizes, ++ .size_type = FIXED_APER_SIZE, ++ .num_aperture_sizes = 2, ++ .needs_scratch_page = true, ++ .configure = intel_i810_configure, ++ .fetch_size = intel_i810_fetch_size, ++ .cleanup = intel_i810_cleanup, ++ .mask_memory = intel_i810_mask_memory, ++ .masks = intel_i810_masks, ++ .agp_enable = intel_i810_agp_enable, ++ .cache_flush = global_cache_flush, ++ .create_gatt_table = agp_generic_create_gatt_table, ++ .free_gatt_table = agp_generic_free_gatt_table, ++ .insert_memory = intel_i810_insert_entries, ++ .remove_memory = intel_i810_remove_entries, ++ .alloc_by_type = intel_i810_alloc_by_type, ++ .free_by_type = intel_i810_free_by_type, ++ .agp_alloc_page = agp_generic_alloc_page, ++ .agp_alloc_pages = agp_generic_alloc_pages, ++ .agp_destroy_page = agp_generic_destroy_page, ++ .agp_destroy_pages = agp_generic_destroy_pages, ++ .agp_type_to_mask_type = agp_generic_type_to_mask_type, ++}; ++ ++static const struct agp_bridge_driver intel_830_driver = { ++ .owner = THIS_MODULE, ++ .aperture_sizes = intel_i830_sizes, ++ .size_type = FIXED_APER_SIZE, ++ .num_aperture_sizes = 4, ++ .needs_scratch_page = true, ++ .configure = intel_i830_configure, ++ .fetch_size = intel_i830_fetch_size, ++ .cleanup = intel_i830_cleanup, ++ .mask_memory = intel_i810_mask_memory, ++ .masks = intel_i810_masks, ++ .agp_enable = intel_i810_agp_enable, ++ .cache_flush = global_cache_flush, ++ .create_gatt_table = intel_i830_create_gatt_table, ++ .free_gatt_table = intel_i830_free_gatt_table, ++ .insert_memory = intel_i830_insert_entries, ++ .remove_memory = intel_i830_remove_entries, ++ .alloc_by_type = intel_i830_alloc_by_type, ++ .free_by_type = intel_i810_free_by_type, ++ .agp_alloc_page = agp_generic_alloc_page, ++ .agp_alloc_pages = agp_generic_alloc_pages, ++ .agp_destroy_page = agp_generic_destroy_page, ++ .agp_destroy_pages = agp_generic_destroy_pages, ++ .agp_type_to_mask_type = intel_i830_type_to_mask_type, ++ .chipset_flush = intel_i830_chipset_flush, ++}; ++ ++static const struct agp_bridge_driver intel_915_driver = { ++ .owner = THIS_MODULE, ++ .aperture_sizes = intel_i830_sizes, ++ .size_type = FIXED_APER_SIZE, ++ .num_aperture_sizes = 4, ++ .needs_scratch_page = true, ++ .configure = intel_i9xx_configure, ++ .fetch_size = intel_i9xx_fetch_size, ++ .cleanup = intel_i915_cleanup, ++ .mask_memory = intel_i810_mask_memory, ++ .masks = intel_i810_masks, ++ .agp_enable = intel_i810_agp_enable, ++ .cache_flush = global_cache_flush, ++ .create_gatt_table = intel_i915_create_gatt_table, ++ .free_gatt_table = intel_i830_free_gatt_table, ++ .insert_memory = intel_i915_insert_entries, ++ .remove_memory = intel_i915_remove_entries, ++ .alloc_by_type = intel_i830_alloc_by_type, ++ .free_by_type = intel_i810_free_by_type, ++ .agp_alloc_page = agp_generic_alloc_page, ++ .agp_alloc_pages = agp_generic_alloc_pages, ++ .agp_destroy_page = agp_generic_destroy_page, ++ .agp_destroy_pages = agp_generic_destroy_pages, ++ .agp_type_to_mask_type = intel_i830_type_to_mask_type, ++ .chipset_flush = intel_i915_chipset_flush, ++#ifdef USE_PCI_DMA_API ++ .agp_map_page = intel_agp_map_page, ++ .agp_unmap_page = intel_agp_unmap_page, ++ .agp_map_memory = intel_agp_map_memory, ++ .agp_unmap_memory = intel_agp_unmap_memory, ++#endif ++}; ++ ++static const struct agp_bridge_driver intel_i965_driver = { ++ .owner = THIS_MODULE, ++ .aperture_sizes = intel_i830_sizes, ++ .size_type = FIXED_APER_SIZE, ++ .num_aperture_sizes = 4, ++ .needs_scratch_page = true, ++ .configure = intel_i9xx_configure, ++ .fetch_size = intel_i9xx_fetch_size, ++ .cleanup = intel_i915_cleanup, ++ .mask_memory = intel_i965_mask_memory, ++ .masks = intel_i810_masks, ++ .agp_enable = intel_i810_agp_enable, ++ .cache_flush = global_cache_flush, ++ .create_gatt_table = intel_i965_create_gatt_table, ++ .free_gatt_table = intel_i830_free_gatt_table, ++ .insert_memory = intel_i915_insert_entries, ++ .remove_memory = intel_i915_remove_entries, ++ .alloc_by_type = intel_i830_alloc_by_type, ++ .free_by_type = intel_i810_free_by_type, ++ .agp_alloc_page = agp_generic_alloc_page, ++ .agp_alloc_pages = agp_generic_alloc_pages, ++ .agp_destroy_page = agp_generic_destroy_page, ++ .agp_destroy_pages = agp_generic_destroy_pages, ++ .agp_type_to_mask_type = intel_i830_type_to_mask_type, ++ .chipset_flush = intel_i915_chipset_flush, ++#ifdef USE_PCI_DMA_API ++ .agp_map_page = intel_agp_map_page, ++ .agp_unmap_page = intel_agp_unmap_page, ++ .agp_map_memory = intel_agp_map_memory, ++ .agp_unmap_memory = intel_agp_unmap_memory, ++#endif ++}; ++ ++static const struct agp_bridge_driver intel_g33_driver = { ++ .owner = THIS_MODULE, ++ .aperture_sizes = intel_i830_sizes, ++ .size_type = FIXED_APER_SIZE, ++ .num_aperture_sizes = 4, ++ .needs_scratch_page = true, ++ .configure = intel_i9xx_configure, ++ .fetch_size = intel_i9xx_fetch_size, ++ .cleanup = intel_i915_cleanup, ++ .mask_memory = intel_i965_mask_memory, ++ .masks = intel_i810_masks, ++ .agp_enable = intel_i810_agp_enable, ++ .cache_flush = global_cache_flush, ++ .create_gatt_table = intel_i915_create_gatt_table, ++ .free_gatt_table = intel_i830_free_gatt_table, ++ .insert_memory = intel_i915_insert_entries, ++ .remove_memory = intel_i915_remove_entries, ++ .alloc_by_type = intel_i830_alloc_by_type, ++ .free_by_type = intel_i810_free_by_type, ++ .agp_alloc_page = agp_generic_alloc_page, ++ .agp_alloc_pages = agp_generic_alloc_pages, ++ .agp_destroy_page = agp_generic_destroy_page, ++ .agp_destroy_pages = agp_generic_destroy_pages, ++ .agp_type_to_mask_type = intel_i830_type_to_mask_type, ++ .chipset_flush = intel_i915_chipset_flush, ++#ifdef USE_PCI_DMA_API ++ .agp_map_page = intel_agp_map_page, ++ .agp_unmap_page = intel_agp_unmap_page, ++ .agp_map_memory = intel_agp_map_memory, ++ .agp_unmap_memory = intel_agp_unmap_memory, ++#endif ++}; +diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c +index 10f24e3..b9734a9 100644 +--- a/drivers/char/agp/nvidia-agp.c ++++ b/drivers/char/agp/nvidia-agp.c +@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = { + .aperture_sizes = nvidia_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 5, ++ .needs_scratch_page = true, + .configure = nvidia_configure, + .fetch_size = nvidia_fetch_size, + .cleanup = nvidia_cleanup, +diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c +index 6c3837a..29aacd8 100644 +--- a/drivers/char/agp/sis-agp.c ++++ b/drivers/char/agp/sis-agp.c +@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = { + .aperture_sizes = sis_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 7, ++ .needs_scratch_page = true, + .configure = sis_configure, + .fetch_size = sis_fetch_size, + .cleanup = sis_cleanup, +@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = { + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, +- { +- .class = (PCI_CLASS_BRIDGE_HOST << 8), +- .class_mask = ~0, +- .vendor = PCI_VENDOR_ID_SI, +- .device = PCI_DEVICE_ID_SI_760, +- .subvendor = PCI_ANY_ID, +- .subdevice = PCI_ANY_ID, +- }, + { } + }; + +diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c +index 6f48931..95db713 100644 +--- a/drivers/char/agp/uninorth-agp.c ++++ b/drivers/char/agp/uninorth-agp.c +@@ -28,6 +28,7 @@ + */ + static int uninorth_rev; + static int is_u3; ++static u32 scratch_value; + + #define DEFAULT_APERTURE_SIZE 256 + #define DEFAULT_APERTURE_STRING "256" +@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty + + gp = (u32 *) &agp_bridge->gatt_table[pg_start]; + for (i = 0; i < mem->page_count; ++i) { +- if (gp[i]) { ++ if (gp[i] != scratch_value) { + dev_info(&agp_bridge->dev->dev, + "uninorth_insert_memory: entry 0x%x occupied (%x)\n", + i, gp[i]); +@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) + return 0; + + gp = (u32 *) &agp_bridge->gatt_table[pg_start]; +- for (i = 0; i < mem->page_count; ++i) +- gp[i] = 0; ++ for (i = 0; i < mem->page_count; ++i) { ++ gp[i] = scratch_value; ++ } + mb(); + uninorth_tlbflush(mem); + +@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) + + bridge->gatt_bus_addr = virt_to_phys(table); + ++ if (is_u3) ++ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL; ++ else ++ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) | ++ 0x1UL); + for (i = 0; i < num_entries; i++) +- bridge->gatt_table[i] = 0; ++ bridge->gatt_table[i] = scratch_value; + + return 0; + +@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, + .cant_use_aperture = true, ++ .needs_scratch_page = true, + }; + + const struct agp_bridge_driver u3_agp_driver = { +diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c +index d3bd243..df67e80 100644 +--- a/drivers/char/agp/via-agp.c ++++ b/drivers/char/agp/via-agp.c +@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = { + .aperture_sizes = agp3_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 10, ++ .needs_scratch_page = true, + .configure = via_configure_agp3, + .fetch_size = via_fetch_size_agp3, + .cleanup = via_cleanup_agp3, +@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = { + .aperture_sizes = via_generic_sizes, + .size_type = U8_APER_SIZE, + .num_aperture_sizes = 9, ++ .needs_scratch_page = true, + .configure = via_configure, + .fetch_size = via_fetch_size, + .cleanup = via_cleanup, +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index 305c590..c2711c6 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -9,6 +9,7 @@ menuconfig DRM + depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU + select I2C + select I2C_ALGOBIT ++ select SLOW_WORK + help + Kernel-level support for the Direct Rendering Infrastructure (DRI) + introduced in XFree86 4.0. If you say Y here, you need to select +@@ -59,6 +60,7 @@ config DRM_RADEON + select FW_LOADER + select DRM_KMS_HELPER + select DRM_TTM ++ select POWER_SUPPLY + help + Choose this option if you have an ATI Radeon graphics card. There + are both PCI and AGP versions. You don't need to choose this to +@@ -157,3 +159,5 @@ config DRM_SAVAGE + help + Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister + chipset. If M is selected the module will be called savage. ++ ++source "drivers/gpu/drm/nouveau/Kconfig" +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c +index 932b5aa..3f46772 100644 +--- a/drivers/gpu/drm/drm_auth.c ++++ b/drivers/gpu/drm/drm_auth.c +@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, + struct drm_device *dev = master->minor->dev; + DRM_DEBUG("%d\n", magic); + +- entry = kmalloc(sizeof(*entry), GFP_KERNEL); ++ entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; +- memset(entry, 0, sizeof(*entry)); + entry->priv = priv; + entry->hash_item.key = (unsigned long)magic; + mutex_lock(&dev->struct_mutex); +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index 61b9bcf..57cea01 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -34,6 +34,7 @@ + #include "drm.h" + #include "drmP.h" + #include "drm_crtc.h" ++#include "drm_edid.h" + + struct drm_prop_enum_list { + int type; +@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector) + list_for_each_entry_safe(mode, t, &connector->user_modes, head) + drm_mode_remove(connector, mode); + +- kfree(connector->fb_helper_private); + mutex_lock(&dev->mode_config.mutex); + drm_mode_object_put(dev, &connector->base); + list_del(&connector->head); +@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev) + mutex_init(&dev->mode_config.mutex); + mutex_init(&dev->mode_config.idr_mutex); + INIT_LIST_HEAD(&dev->mode_config.fb_list); +- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); + INIT_LIST_HEAD(&dev->mode_config.crtc_list); + INIT_LIST_HEAD(&dev->mode_config.connector_list); + INIT_LIST_HEAD(&dev->mode_config.encoder_list); +@@ -1841,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + + ret = copy_from_user(clips, clips_ptr, + num_clips * sizeof(*clips)); +- if (ret) ++ if (ret) { ++ ret = -EFAULT; + goto out_err2; ++ } + } + + if (fb->funcs->dirty) { +@@ -2350,7 +2351,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, + struct edid *edid) + { + struct drm_device *dev = connector->dev; +- int ret = 0; ++ int ret = 0, size; + + if (connector->edid_blob_ptr) + drm_property_destroy_blob(dev, connector->edid_blob_ptr); +@@ -2362,7 +2363,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, + return ret; + } + +- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); ++ size = EDID_LENGTH * (1 + edid->extensions); ++ connector->edid_blob_ptr = drm_property_create_blob(connector->dev, ++ size, edid); + + ret = drm_connector_property_set_value(connector, + dev->mode_config.edid_property, +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index 51103aa..9b2a541 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector, + } + + /** +- * drm_helper_probe_connector_modes - get complete set of display modes ++ * drm_helper_probe_single_connector_modes - get complete set of display modes + * @dev: DRM device + * @maxX: max width for modes + * @maxY: max height for modes +@@ -154,21 +154,6 @@ prune: + } + EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); + +-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, +- uint32_t maxY) +-{ +- struct drm_connector *connector; +- int count = 0; +- +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- count += drm_helper_probe_single_connector_modes(connector, +- maxX, maxY); +- } +- +- return count; +-} +-EXPORT_SYMBOL(drm_helper_probe_connector_modes); +- + /** + * drm_helper_encoder_in_use - check if a given encoder is in use + * @encoder: encoder to check +@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) + } + EXPORT_SYMBOL(drm_helper_disable_unused_functions); + +-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height) +-{ +- struct drm_display_mode *mode; +- +- list_for_each_entry(mode, &connector->modes, head) { +- if (drm_mode_width(mode) > width || +- drm_mode_height(mode) > height) +- continue; +- if (mode->type & DRM_MODE_TYPE_PREFERRED) +- return mode; +- } +- return NULL; +-} +- +-static bool drm_has_cmdline_mode(struct drm_connector *connector) +-{ +- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; +- struct drm_fb_helper_cmdline_mode *cmdline_mode; +- +- if (!fb_help_conn) +- return false; +- +- cmdline_mode = &fb_help_conn->cmdline_mode; +- return cmdline_mode->specified; +-} +- +-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height) +-{ +- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; +- struct drm_fb_helper_cmdline_mode *cmdline_mode; +- struct drm_display_mode *mode = NULL; +- +- if (!fb_help_conn) +- return mode; +- +- cmdline_mode = &fb_help_conn->cmdline_mode; +- if (cmdline_mode->specified == false) +- return mode; +- +- /* attempt to find a matching mode in the list of modes +- * we have gotten so far, if not add a CVT mode that conforms +- */ +- if (cmdline_mode->rb || cmdline_mode->margins) +- goto create_mode; +- +- list_for_each_entry(mode, &connector->modes, head) { +- /* check width/height */ +- if (mode->hdisplay != cmdline_mode->xres || +- mode->vdisplay != cmdline_mode->yres) +- continue; +- +- if (cmdline_mode->refresh_specified) { +- if (mode->vrefresh != cmdline_mode->refresh) +- continue; +- } +- +- if (cmdline_mode->interlace) { +- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) +- continue; +- } +- return mode; +- } +- +-create_mode: +- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres, +- cmdline_mode->yres, +- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, +- cmdline_mode->rb, cmdline_mode->interlace, +- cmdline_mode->margins); +- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); +- list_add(&mode->head, &connector->modes); +- return mode; +-} +- +-static bool drm_connector_enabled(struct drm_connector *connector, bool strict) +-{ +- bool enable; +- +- if (strict) { +- enable = connector->status == connector_status_connected; +- } else { +- enable = connector->status != connector_status_disconnected; +- } +- return enable; +-} +- +-static void drm_enable_connectors(struct drm_device *dev, bool *enabled) +-{ +- bool any_enabled = false; +- struct drm_connector *connector; +- int i = 0; +- +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- enabled[i] = drm_connector_enabled(connector, true); +- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, +- enabled[i] ? "yes" : "no"); +- any_enabled |= enabled[i]; +- i++; +- } +- +- if (any_enabled) +- return; +- +- i = 0; +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- enabled[i] = drm_connector_enabled(connector, false); +- i++; +- } +-} +- +-static bool drm_target_preferred(struct drm_device *dev, +- struct drm_display_mode **modes, +- bool *enabled, int width, int height) +-{ +- struct drm_connector *connector; +- int i = 0; +- +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- +- if (enabled[i] == false) { +- i++; +- continue; +- } +- +- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", +- connector->base.id); +- +- /* got for command line mode first */ +- modes[i] = drm_pick_cmdline_mode(connector, width, height); +- if (!modes[i]) { +- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", +- connector->base.id); +- modes[i] = drm_has_preferred_mode(connector, width, height); +- } +- /* No preferred modes, pick one off the list */ +- if (!modes[i] && !list_empty(&connector->modes)) { +- list_for_each_entry(modes[i], &connector->modes, head) +- break; +- } +- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : +- "none"); +- i++; +- } +- return true; +-} +- +-static int drm_pick_crtcs(struct drm_device *dev, +- struct drm_crtc **best_crtcs, +- struct drm_display_mode **modes, +- int n, int width, int height) +-{ +- int c, o; +- struct drm_connector *connector; +- struct drm_connector_helper_funcs *connector_funcs; +- struct drm_encoder *encoder; +- struct drm_crtc *best_crtc; +- int my_score, best_score, score; +- struct drm_crtc **crtcs, *crtc; +- +- if (n == dev->mode_config.num_connector) +- return 0; +- c = 0; +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- if (c == n) +- break; +- c++; +- } +- +- best_crtcs[n] = NULL; +- best_crtc = NULL; +- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); +- if (modes[n] == NULL) +- return best_score; +- +- crtcs = kmalloc(dev->mode_config.num_connector * +- sizeof(struct drm_crtc *), GFP_KERNEL); +- if (!crtcs) +- return best_score; +- +- my_score = 1; +- if (connector->status == connector_status_connected) +- my_score++; +- if (drm_has_cmdline_mode(connector)) +- my_score++; +- if (drm_has_preferred_mode(connector, width, height)) +- my_score++; +- +- connector_funcs = connector->helper_private; +- encoder = connector_funcs->best_encoder(connector); +- if (!encoder) +- goto out; +- +- connector->encoder = encoder; +- +- /* select a crtc for this connector and then attempt to configure +- remaining connectors */ +- c = 0; +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- +- if ((encoder->possible_crtcs & (1 << c)) == 0) { +- c++; +- continue; +- } +- +- for (o = 0; o < n; o++) +- if (best_crtcs[o] == crtc) +- break; +- +- if (o < n) { +- /* ignore cloning for now */ +- c++; +- continue; +- } +- +- crtcs[n] = crtc; +- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *)); +- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1, +- width, height); +- if (score > best_score) { +- best_crtc = crtc; +- best_score = score; +- memcpy(best_crtcs, crtcs, +- dev->mode_config.num_connector * +- sizeof(struct drm_crtc *)); +- } +- c++; +- } +-out: +- kfree(crtcs); +- return best_score; +-} +- +-static void drm_setup_crtcs(struct drm_device *dev) +-{ +- struct drm_crtc **crtcs; +- struct drm_display_mode **modes; +- struct drm_encoder *encoder; +- struct drm_connector *connector; +- bool *enabled; +- int width, height; +- int i, ret; +- +- DRM_DEBUG_KMS("\n"); +- +- width = dev->mode_config.max_width; +- height = dev->mode_config.max_height; +- +- /* clean out all the encoder/crtc combos */ +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- encoder->crtc = NULL; +- } +- +- crtcs = kcalloc(dev->mode_config.num_connector, +- sizeof(struct drm_crtc *), GFP_KERNEL); +- modes = kcalloc(dev->mode_config.num_connector, +- sizeof(struct drm_display_mode *), GFP_KERNEL); +- enabled = kcalloc(dev->mode_config.num_connector, +- sizeof(bool), GFP_KERNEL); +- +- drm_enable_connectors(dev, enabled); +- +- ret = drm_target_preferred(dev, modes, enabled, width, height); +- if (!ret) +- DRM_ERROR("Unable to find initial modes\n"); +- +- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); +- +- drm_pick_crtcs(dev, crtcs, modes, 0, width, height); +- +- i = 0; +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct drm_display_mode *mode = modes[i]; +- struct drm_crtc *crtc = crtcs[i]; +- +- if (connector->encoder == NULL) { +- i++; +- continue; +- } +- +- if (mode && crtc) { +- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", +- mode->name, crtc->base.id); +- crtc->desired_mode = mode; +- connector->encoder->crtc = crtc; +- } else { +- connector->encoder->crtc = NULL; +- connector->encoder = NULL; +- } +- i++; +- } +- +- kfree(crtcs); +- kfree(modes); +- kfree(enabled); +-} +- + /** + * drm_encoder_crtc_ok - can a given crtc drive a given encoder? + * @encoder: encoder to test +@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) + ret = -EINVAL; + goto fail; + } +- /* TODO are these needed? */ +- set->crtc->desired_x = set->x; +- set->crtc->desired_y = set->y; +- set->crtc->desired_mode = set->mode; + } + drm_helper_disable_unused_functions(dev); + } else if (fb_changed) { +@@ -984,63 +669,6 @@ fail: + } + EXPORT_SYMBOL(drm_crtc_helper_set_config); + +-bool drm_helper_plugged_event(struct drm_device *dev) +-{ +- DRM_DEBUG_KMS("\n"); +- +- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, +- dev->mode_config.max_height); +- +- drm_setup_crtcs(dev); +- +- /* alert the driver fb layer */ +- dev->mode_config.funcs->fb_changed(dev); +- +- /* FIXME: send hotplug event */ +- return true; +-} +-/** +- * drm_initial_config - setup a sane initial connector configuration +- * @dev: DRM device +- * +- * LOCKING: +- * Called at init time, must take mode config lock. +- * +- * Scan the CRTCs and connectors and try to put together an initial setup. +- * At the moment, this is a cloned configuration across all heads with +- * a new framebuffer object as the backing store. +- * +- * RETURNS: +- * Zero if everything went ok, nonzero otherwise. +- */ +-bool drm_helper_initial_config(struct drm_device *dev) +-{ +- int count = 0; +- +- /* disable all the possible outputs/crtcs before entering KMS mode */ +- drm_helper_disable_unused_functions(dev); +- +- drm_fb_helper_parse_command_line(dev); +- +- count = drm_helper_probe_connector_modes(dev, +- dev->mode_config.max_width, +- dev->mode_config.max_height); +- +- /* +- * we shouldn't end up with no modes here. +- */ +- if (count == 0) +- printk(KERN_INFO "No connectors reported connected with modes\n"); +- +- drm_setup_crtcs(dev); +- +- /* alert the driver fb layer */ +- dev->mode_config.funcs->fb_changed(dev); +- +- return 0; +-} +-EXPORT_SYMBOL(drm_helper_initial_config); +- + static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) + { + int dpms = DRM_MODE_DPMS_OFF; +@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) + } + EXPORT_SYMBOL(drm_helper_connector_dpms); + +-/** +- * drm_hotplug_stage_two +- * @dev DRM device +- * @connector hotpluged connector +- * +- * LOCKING. +- * Caller must hold mode config lock, function might grab struct lock. +- * +- * Stage two of a hotplug. +- * +- * RETURNS: +- * Zero on success, errno on failure. +- */ +-int drm_helper_hotplug_stage_two(struct drm_device *dev) +-{ +- drm_helper_plugged_event(dev); +- +- return 0; +-} +-EXPORT_SYMBOL(drm_helper_hotplug_stage_two); +- + int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, + struct drm_mode_fb_cmd *mode_cmd) + { +@@ -1200,3 +807,114 @@ int drm_helper_resume_force_mode(struct drm_device *dev) + return 0; + } + EXPORT_SYMBOL(drm_helper_resume_force_mode); ++ ++static struct slow_work_ops output_poll_ops; ++ ++#define DRM_OUTPUT_POLL_PERIOD (10*HZ) ++static void output_poll_execute(struct slow_work *work) ++{ ++ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); ++ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); ++ struct drm_connector *connector; ++ enum drm_connector_status old_status, status; ++ bool repoll = false, changed = false; ++ int ret; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ ++ /* if this is HPD or polled don't check it - ++ TV out for instance */ ++ if (!connector->polled) ++ continue; ++ ++ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) ++ repoll = true; ++ ++ old_status = connector->status; ++ /* if we are connected and don't want to poll for disconnect ++ skip it */ ++ if (old_status == connector_status_connected && ++ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && ++ !(connector->polled & DRM_CONNECTOR_POLL_HPD)) ++ continue; ++ ++ status = connector->funcs->detect(connector); ++ if (old_status != status) ++ changed = true; ++ } ++ ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ if (changed) { ++ /* send a uevent + call fbdev */ ++ drm_sysfs_hotplug_event(dev); ++ if (dev->mode_config.funcs->output_poll_changed) ++ dev->mode_config.funcs->output_poll_changed(dev); ++ } ++ ++ if (repoll) { ++ ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); ++ if (ret) ++ DRM_ERROR("delayed enqueue failed %d\n", ret); ++ } ++} ++ ++void drm_kms_helper_poll_disable(struct drm_device *dev) ++{ ++ if (!dev->mode_config.poll_enabled) ++ return; ++ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); ++} ++EXPORT_SYMBOL(drm_kms_helper_poll_disable); ++ ++void drm_kms_helper_poll_enable(struct drm_device *dev) ++{ ++ bool poll = false; ++ struct drm_connector *connector; ++ int ret; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (connector->polled) ++ poll = true; ++ } ++ ++ if (poll) { ++ ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); ++ if (ret) ++ DRM_ERROR("delayed enqueue failed %d\n", ret); ++ } ++} ++EXPORT_SYMBOL(drm_kms_helper_poll_enable); ++ ++void drm_kms_helper_poll_init(struct drm_device *dev) ++{ ++ slow_work_register_user(THIS_MODULE); ++ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, ++ &output_poll_ops); ++ dev->mode_config.poll_enabled = true; ++ ++ drm_kms_helper_poll_enable(dev); ++} ++EXPORT_SYMBOL(drm_kms_helper_poll_init); ++ ++void drm_kms_helper_poll_fini(struct drm_device *dev) ++{ ++ drm_kms_helper_poll_disable(dev); ++ slow_work_unregister_user(THIS_MODULE); ++} ++EXPORT_SYMBOL(drm_kms_helper_poll_fini); ++ ++void drm_helper_hpd_irq_event(struct drm_device *dev) ++{ ++ if (!dev->mode_config.poll_enabled) ++ return; ++ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); ++ /* schedule a slow work asap */ ++ delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); ++} ++EXPORT_SYMBOL(drm_helper_hpd_irq_event); ++ ++static struct slow_work_ops output_poll_ops = { ++ .execute = output_poll_execute, ++}; +diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c +index 13f1537..252cbd7 100644 +--- a/drivers/gpu/drm/drm_dma.c ++++ b/drivers/gpu/drm/drm_dma.c +@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev) + { + int i; + +- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); ++ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); + if (!dev->dma) + return -ENOMEM; + +- memset(dev->dma, 0, sizeof(*dev->dma)); +- + for (i = 0; i <= DRM_MAX_ORDER; i++) + memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); + +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index 18f41d7..c198186 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -2,6 +2,7 @@ + * Copyright (c) 2006 Luc Verhaegen (quirks list) + * Copyright (c) 2007-2008 Intel Corporation + * Jesse Barnes ++ * Copyright 2010 Red Hat, Inc. + * + * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from + * FB layer. +@@ -33,10 +34,9 @@ + #include "drmP.h" + #include "drm_edid.h" + +-/* +- * TODO: +- * - support EDID 1.4 (incl. CE blocks) +- */ ++#define EDID_EST_TIMINGS 16 ++#define EDID_STD_TIMINGS 8 ++#define EDID_DETAILED_TIMINGS 4 + + /* + * EDID blocks out in the wild have a variety of bugs, try to collect +@@ -65,7 +65,8 @@ + + #define LEVEL_DMT 0 + #define LEVEL_GTF 1 +-#define LEVEL_CVT 2 ++#define LEVEL_GTF2 2 ++#define LEVEL_CVT 3 + + static struct edid_quirk { + char *vendor; +@@ -109,51 +110,64 @@ static struct edid_quirk { + { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, + }; + ++/*** DDC fetch and block validation ***/ + +-/* Valid EDID header has these bytes */ + static const u8 edid_header[] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 + }; + +-/** +- * drm_edid_is_valid - sanity check EDID data +- * @edid: EDID data +- * +- * Sanity check the EDID block by looking at the header, the version number +- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's +- * valid. ++/* ++ * Sanity check the EDID block (base or extension). Return 0 if the block ++ * doesn't check out, or 1 if it's valid. + */ +-bool drm_edid_is_valid(struct edid *edid) ++static bool ++drm_edid_block_valid(u8 *raw_edid) + { +- int i, score = 0; ++ int i; + u8 csum = 0; +- u8 *raw_edid = (u8 *)edid; ++ struct edid *edid = (struct edid *)raw_edid; ++ ++ if (raw_edid[0] == 0x00) { ++ int score = 0; + +- for (i = 0; i < sizeof(edid_header); i++) +- if (raw_edid[i] == edid_header[i]) +- score++; ++ for (i = 0; i < sizeof(edid_header); i++) ++ if (raw_edid[i] == edid_header[i]) ++ score++; + +- if (score == 8) ; +- else if (score >= 6) { +- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); +- memcpy(raw_edid, edid_header, sizeof(edid_header)); +- } else +- goto bad; ++ if (score == 8) ; ++ else if (score >= 6) { ++ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); ++ memcpy(raw_edid, edid_header, sizeof(edid_header)); ++ } else { ++ goto bad; ++ } ++ } + + for (i = 0; i < EDID_LENGTH; i++) + csum += raw_edid[i]; + if (csum) { + DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); +- goto bad; +- } + +- if (edid->version != 1) { +- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); +- goto bad; ++ /* allow CEA to slide through, switches mangle this */ ++ if (raw_edid[0] != 0x02) ++ goto bad; + } + +- if (edid->revision > 4) +- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); ++ /* per-block-type checks */ ++ switch (raw_edid[0]) { ++ case 0: /* base */ ++ if (edid->version != 1) { ++ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); ++ goto bad; ++ } ++ ++ if (edid->revision > 4) ++ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); ++ break; ++ ++ default: ++ break; ++ } + + return 1; + +@@ -165,8 +179,158 @@ bad: + } + return 0; + } ++ ++/** ++ * drm_edid_is_valid - sanity check EDID data ++ * @edid: EDID data ++ * ++ * Sanity-check an entire EDID record (including extensions) ++ */ ++bool drm_edid_is_valid(struct edid *edid) ++{ ++ int i; ++ u8 *raw = (u8 *)edid; ++ ++ if (!edid) ++ return false; ++ ++ for (i = 0; i <= edid->extensions; i++) ++ if (!drm_edid_block_valid(raw + i * EDID_LENGTH)) ++ return false; ++ ++ return true; ++} + EXPORT_SYMBOL(drm_edid_is_valid); + ++#define DDC_ADDR 0x50 ++#define DDC_SEGMENT_ADDR 0x30 ++/** ++ * Get EDID information via I2C. ++ * ++ * \param adapter : i2c device adaptor ++ * \param buf : EDID data buffer to be filled ++ * \param len : EDID data buffer length ++ * \return 0 on success or -1 on failure. ++ * ++ * Try to fetch EDID information by calling i2c driver function. ++ */ ++static int ++drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, ++ int block, int len) ++{ ++ unsigned char start = block * EDID_LENGTH; ++ struct i2c_msg msgs[] = { ++ { ++ .addr = DDC_ADDR, ++ .flags = 0, ++ .len = 1, ++ .buf = &start, ++ }, { ++ .addr = DDC_ADDR, ++ .flags = I2C_M_RD, ++ .len = len, ++ .buf = buf + start, ++ } ++ }; ++ ++ if (i2c_transfer(adapter, msgs, 2) == 2) ++ return 0; ++ ++ return -1; ++} ++ ++static u8 * ++drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) ++{ ++ int i, j = 0; ++ u8 *block, *new; ++ ++ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) ++ return NULL; ++ ++ /* base block fetch */ ++ for (i = 0; i < 4; i++) { ++ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) ++ goto out; ++ if (drm_edid_block_valid(block)) ++ break; ++ } ++ if (i == 4) ++ goto carp; ++ ++ /* if there's no extensions, we're done */ ++ if (block[0x7e] == 0) ++ return block; ++ ++ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); ++ if (!new) ++ goto out; ++ block = new; ++ ++ for (j = 1; j <= block[0x7e]; j++) { ++ for (i = 0; i < 4; i++) { ++ if (drm_do_probe_ddc_edid(adapter, block, j, ++ EDID_LENGTH)) ++ goto out; ++ if (drm_edid_block_valid(block + j * EDID_LENGTH)) ++ break; ++ } ++ if (i == 4) ++ goto carp; ++ } ++ ++ return block; ++ ++carp: ++ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n", ++ drm_get_connector_name(connector), j); ++ ++out: ++ kfree(block); ++ return NULL; ++} ++ ++/** ++ * Probe DDC presence. ++ * ++ * \param adapter : i2c device adaptor ++ * \return 1 on success ++ */ ++static bool ++drm_probe_ddc(struct i2c_adapter *adapter) ++{ ++ unsigned char out; ++ ++ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); ++} ++ ++/** ++ * drm_get_edid - get EDID data, if available ++ * @connector: connector we're probing ++ * @adapter: i2c adapter to use for DDC ++ * ++ * Poke the given i2c channel to grab EDID data if possible. If found, ++ * attach it to the connector. ++ * ++ * Return edid data or NULL if we couldn't find any. ++ */ ++struct edid *drm_get_edid(struct drm_connector *connector, ++ struct i2c_adapter *adapter) ++{ ++ struct edid *edid = NULL; ++ ++ if (drm_probe_ddc(adapter)) ++ edid = (struct edid *)drm_do_get_edid(connector, adapter); ++ ++ connector->display_info.raw_edid = (char *)edid; ++ ++ return edid; ++ ++} ++EXPORT_SYMBOL(drm_get_edid); ++ ++/*** EDID parsing ***/ ++ + /** + * edid_vendor - match a string against EDID's obfuscated vendor field + * @edid: EDID to match +@@ -335,7 +499,7 @@ static struct drm_display_mode drm_dmt_modes[] = { + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@85Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, +- 1072, 1376, 0, 768, 769, 772, 808, 0, ++ 1168, 1376, 0, 768, 769, 772, 808, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1152x864@75Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, +@@ -426,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = { + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1600x1200@75Hz */ +- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664, ++ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1600x1200@85Hz */ +@@ -497,8 +661,8 @@ static struct drm_display_mode drm_dmt_modes[] = { + static const int drm_num_dmt_modes = + sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); + +-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, +- int hsize, int vsize, int fresh) ++struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, ++ int hsize, int vsize, int fresh) + { + int i; + struct drm_display_mode *ptr, *mode; +@@ -516,6 +680,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, + } + return mode; + } ++EXPORT_SYMBOL(drm_mode_find_dmt); ++ ++typedef void detailed_cb(struct detailed_timing *timing, void *closure); ++ ++static void ++drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) ++{ ++ int i; ++ struct edid *edid = (struct edid *)raw_edid; ++ ++ if (edid == NULL) ++ return; ++ ++ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) ++ cb(&(edid->detailed_timings[i]), closure); ++ ++ /* XXX extension block walk */ ++} ++ ++static void ++is_rb(struct detailed_timing *t, void *data) ++{ ++ u8 *r = (u8 *)t; ++ if (r[3] == EDID_DETAIL_MONITOR_RANGE) ++ if (r[15] & 0x10) ++ *(bool *)data = true; ++} ++ ++/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ ++static bool ++drm_monitor_supports_rb(struct edid *edid) ++{ ++ if (edid->revision >= 4) { ++ bool ret; ++ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); ++ return ret; ++ } ++ ++ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); ++} ++ ++static void ++find_gtf2(struct detailed_timing *t, void *data) ++{ ++ u8 *r = (u8 *)t; ++ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) ++ *(u8 **)data = r; ++} ++ ++/* Secondary GTF curve kicks in above some break frequency */ ++static int ++drm_gtf2_hbreak(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? (r[12] * 2) : 0; ++} ++ ++static int ++drm_gtf2_2c(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? r[13] : 0; ++} ++ ++static int ++drm_gtf2_m(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? (r[15] << 8) + r[14] : 0; ++} ++ ++static int ++drm_gtf2_k(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? r[16] : 0; ++} ++ ++static int ++drm_gtf2_2j(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? r[17] : 0; ++} ++ ++/** ++ * standard_timing_level - get std. timing level(CVT/GTF/DMT) ++ * @edid: EDID block to scan ++ */ ++static int standard_timing_level(struct edid *edid) ++{ ++ if (edid->revision >= 2) { ++ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) ++ return LEVEL_CVT; ++ if (drm_gtf2_hbreak(edid)) ++ return LEVEL_GTF2; ++ return LEVEL_GTF; ++ } ++ return LEVEL_DMT; ++} + + /* + * 0 is reserved. The spec says 0x01 fill for unused timings. Some old +@@ -536,22 +805,20 @@ bad_std_timing(u8 a, u8 b) + * + * Take the standard timing params (in this case width, aspect, and refresh) + * and convert them into a real mode using CVT/GTF/DMT. +- * +- * Punts for now, but should eventually use the FB layer's CVT based mode +- * generation code. + */ +-struct drm_display_mode *drm_mode_std(struct drm_device *dev, +- struct std_timing *t, +- int revision, +- int timing_level) ++static struct drm_display_mode * ++drm_mode_std(struct drm_connector *connector, struct edid *edid, ++ struct std_timing *t, int revision) + { +- struct drm_display_mode *mode; ++ struct drm_device *dev = connector->dev; ++ struct drm_display_mode *m, *mode = NULL; + int hsize, vsize; + int vrefresh_rate; + unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) + >> EDID_TIMING_ASPECT_SHIFT; + unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) + >> EDID_TIMING_VFREQ_SHIFT; ++ int timing_level = standard_timing_level(edid); + + if (bad_std_timing(t->hsize, t->vfreq_aspect)) + return NULL; +@@ -572,18 +839,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, + vsize = (hsize * 4) / 5; + else + vsize = (hsize * 9) / 16; +- /* HDTV hack */ +- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { +- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, ++ ++ /* HDTV hack, part 1 */ ++ if (vrefresh_rate == 60 && ++ ((hsize == 1360 && vsize == 765) || ++ (hsize == 1368 && vsize == 769))) { ++ hsize = 1366; ++ vsize = 768; ++ } ++ ++ /* ++ * If this connector already has a mode for this size and refresh ++ * rate (because it came from detailed or CVT info), use that ++ * instead. This way we don't have to guess at interlace or ++ * reduced blanking. ++ */ ++ list_for_each_entry(m, &connector->probed_modes, head) ++ if (m->hdisplay == hsize && m->vdisplay == vsize && ++ drm_mode_vrefresh(m) == vrefresh_rate) ++ return NULL; ++ ++ /* HDTV hack, part 2 */ ++ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { ++ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, + false); + mode->hdisplay = 1366; + mode->vsync_start = mode->vsync_start - 1; + mode->vsync_end = mode->vsync_end - 1; + return mode; + } +- mode = NULL; ++ + /* check whether it can be found in default mode table */ +- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); ++ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); + if (mode) + return mode; + +@@ -593,6 +880,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, + case LEVEL_GTF: + mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + break; ++ case LEVEL_GTF2: ++ /* ++ * This is potentially wrong if there's ever a monitor with ++ * more than one ranges section, each claiming a different ++ * secondary GTF curve. Please don't do that. ++ */ ++ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); ++ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { ++ kfree(mode); ++ mode = drm_gtf_mode_complex(dev, hsize, vsize, ++ vrefresh_rate, 0, 0, ++ drm_gtf2_m(edid), ++ drm_gtf2_2c(edid), ++ drm_gtf2_k(edid), ++ drm_gtf2_2j(edid)); ++ } ++ break; + case LEVEL_CVT: + mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, + false); +@@ -716,10 +1020,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, + if (mode->vsync_end > mode->vtotal) + mode->vtotal = mode->vsync_end + 1; + +- drm_mode_set_name(mode); +- + drm_mode_do_interlace_quirk(mode, pt); + ++ drm_mode_set_name(mode); ++ + if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { + pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; + } +@@ -802,10 +1106,6 @@ static struct drm_display_mode edid_est_modes[] = { + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ + }; + +-#define EDID_EST_TIMINGS 16 +-#define EDID_STD_TIMINGS 8 +-#define EDID_DETAILED_TIMINGS 4 +- + /** + * add_established_modes - get est. modes from EDID and add them + * @edid: EDID block to scan +@@ -833,19 +1133,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e + + return modes; + } +-/** +- * stanard_timing_level - get std. timing level(CVT/GTF/DMT) +- * @edid: EDID block to scan +- */ +-static int standard_timing_level(struct edid *edid) +-{ +- if (edid->revision >= 2) { +- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) +- return LEVEL_CVT; +- return LEVEL_GTF; +- } +- return LEVEL_DMT; +-} + + /** + * add_standard_modes - get std. modes from EDID and add them +@@ -856,22 +1143,14 @@ static int standard_timing_level(struct edid *edid) + */ + static int add_standard_modes(struct drm_connector *connector, struct edid *edid) + { +- struct drm_device *dev = connector->dev; + int i, modes = 0; +- int timing_level; +- +- timing_level = standard_timing_level(edid); + + for (i = 0; i < EDID_STD_TIMINGS; i++) { +- struct std_timing *t = &edid->standard_timings[i]; + struct drm_display_mode *newmode; + +- /* If std timings bytes are 1, 1 it's empty */ +- if (t->hsize == 1 && t->vfreq_aspect == 1) +- continue; +- +- newmode = drm_mode_std(dev, &edid->standard_timings[i], +- edid->revision, timing_level); ++ newmode = drm_mode_std(connector, edid, ++ &edid->standard_timings[i], ++ edid->revision); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; +@@ -881,36 +1160,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid + return modes; + } + +-/* +- * XXX fix this for: +- * - GTF secondary curve formula +- * - EDID 1.4 range offsets +- * - CVT extended bits +- */ + static bool +-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) ++mode_is_rb(struct drm_display_mode *mode) + { +- struct detailed_data_monitor_range *range; +- int hsync, vrefresh; +- +- range = &timing->data.other_data.data.range; ++ return (mode->htotal - mode->hdisplay == 160) && ++ (mode->hsync_end - mode->hdisplay == 80) && ++ (mode->hsync_end - mode->hsync_start == 32) && ++ (mode->vsync_start - mode->vdisplay == 3); ++} + ++static bool ++mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) ++{ ++ int hsync, hmin, hmax; ++ ++ hmin = t[7]; ++ if (edid->revision >= 4) ++ hmin += ((t[4] & 0x04) ? 255 : 0); ++ hmax = t[8]; ++ if (edid->revision >= 4) ++ hmax += ((t[4] & 0x08) ? 255 : 0); + hsync = drm_mode_hsync(mode); +- vrefresh = drm_mode_vrefresh(mode); + +- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) ++ return (hsync <= hmax && hsync >= hmin); ++} ++ ++static bool ++mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) ++{ ++ int vsync, vmin, vmax; ++ ++ vmin = t[5]; ++ if (edid->revision >= 4) ++ vmin += ((t[4] & 0x01) ? 255 : 0); ++ vmax = t[6]; ++ if (edid->revision >= 4) ++ vmax += ((t[4] & 0x02) ? 255 : 0); ++ vsync = drm_mode_vrefresh(mode); ++ ++ return (vsync <= vmax && vsync >= vmin); ++} ++ ++static u32 ++range_pixel_clock(struct edid *edid, u8 *t) ++{ ++ /* unspecified */ ++ if (t[9] == 0 || t[9] == 255) ++ return 0; ++ ++ /* 1.4 with CVT support gives us real precision, yay */ ++ if (edid->revision >= 4 && t[10] == 0x04) ++ return (t[9] * 10000) - ((t[12] >> 2) * 250); ++ ++ /* 1.3 is pathetic, so fuzz up a bit */ ++ return t[9] * 10000 + 5001; ++} ++ ++static bool ++mode_in_range(struct drm_display_mode *mode, struct edid *edid, ++ struct detailed_timing *timing) ++{ ++ u32 max_clock; ++ u8 *t = (u8 *)timing; ++ ++ if (!mode_in_hsync_range(mode, edid, t)) + return false; + +- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) ++ if (!mode_in_vsync_range(mode, edid, t)) + return false; + +- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { +- /* be forgiving since it's in units of 10MHz */ +- int max_clock = range->pixel_clock_mhz * 10 + 9; +- max_clock *= 1000; ++ if ((max_clock = range_pixel_clock(edid, t))) + if (mode->clock > max_clock) + return false; +- } ++ ++ /* 1.4 max horizontal check */ ++ if (edid->revision >= 4 && t[10] == 0x04) ++ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) ++ return false; ++ ++ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) ++ return false; + + return true; + } +@@ -919,15 +1248,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) + * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will + * need to account for them. + */ +-static int drm_gtf_modes_for_range(struct drm_connector *connector, +- struct detailed_timing *timing) ++static int ++drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, ++ struct detailed_timing *timing) + { + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + + for (i = 0; i < drm_num_dmt_modes; i++) { +- if (mode_in_range(drm_dmt_modes + i, timing)) { ++ if (mode_in_range(drm_dmt_modes + i, edid, timing)) { + newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); + if (newmode) { + drm_mode_probed_add(connector, newmode); +@@ -988,13 +1318,100 @@ static int drm_cvt_modes(struct drm_connector *connector, + return modes; + } + ++static const struct { ++ short w; ++ short h; ++ short r; ++ short rb; ++} est3_modes[] = { ++ /* byte 6 */ ++ { 640, 350, 85, 0 }, ++ { 640, 400, 85, 0 }, ++ { 720, 400, 85, 0 }, ++ { 640, 480, 85, 0 }, ++ { 848, 480, 60, 0 }, ++ { 800, 600, 85, 0 }, ++ { 1024, 768, 85, 0 }, ++ { 1152, 864, 75, 0 }, ++ /* byte 7 */ ++ { 1280, 768, 60, 1 }, ++ { 1280, 768, 60, 0 }, ++ { 1280, 768, 75, 0 }, ++ { 1280, 768, 85, 0 }, ++ { 1280, 960, 60, 0 }, ++ { 1280, 960, 85, 0 }, ++ { 1280, 1024, 60, 0 }, ++ { 1280, 1024, 85, 0 }, ++ /* byte 8 */ ++ { 1360, 768, 60, 0 }, ++ { 1440, 900, 60, 1 }, ++ { 1440, 900, 60, 0 }, ++ { 1440, 900, 75, 0 }, ++ { 1440, 900, 85, 0 }, ++ { 1400, 1050, 60, 1 }, ++ { 1400, 1050, 60, 0 }, ++ { 1400, 1050, 75, 0 }, ++ /* byte 9 */ ++ { 1400, 1050, 85, 0 }, ++ { 1680, 1050, 60, 1 }, ++ { 1680, 1050, 60, 0 }, ++ { 1680, 1050, 75, 0 }, ++ { 1680, 1050, 85, 0 }, ++ { 1600, 1200, 60, 0 }, ++ { 1600, 1200, 65, 0 }, ++ { 1600, 1200, 70, 0 }, ++ /* byte 10 */ ++ { 1600, 1200, 75, 0 }, ++ { 1600, 1200, 85, 0 }, ++ { 1792, 1344, 60, 0 }, ++ { 1792, 1344, 85, 0 }, ++ { 1856, 1392, 60, 0 }, ++ { 1856, 1392, 75, 0 }, ++ { 1920, 1200, 60, 1 }, ++ { 1920, 1200, 60, 0 }, ++ /* byte 11 */ ++ { 1920, 1200, 75, 0 }, ++ { 1920, 1200, 85, 0 }, ++ { 1920, 1440, 60, 0 }, ++ { 1920, 1440, 75, 0 }, ++}; ++static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); ++ ++static int ++drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) ++{ ++ int i, j, m, modes = 0; ++ struct drm_display_mode *mode; ++ u8 *est = ((u8 *)timing) + 5; ++ ++ for (i = 0; i < 6; i++) { ++ for (j = 7; j > 0; j--) { ++ m = (i * 8) + (7 - j); ++ if (m >= num_est3_modes) ++ break; ++ if (est[i] & (1 << j)) { ++ mode = drm_mode_find_dmt(connector->dev, ++ est3_modes[m].w, ++ est3_modes[m].h, ++ est3_modes[m].r ++ /*, est3_modes[m].rb */); ++ if (mode) { ++ drm_mode_probed_add(connector, mode); ++ modes++; ++ } ++ } ++ } ++ } ++ ++ return modes; ++} ++ + static int add_detailed_modes(struct drm_connector *connector, + struct detailed_timing *timing, + struct edid *edid, u32 quirks, int preferred) + { + int i, modes = 0; + struct detailed_non_pixel *data = &timing->data.other_data; +- int timing_level = standard_timing_level(edid); + int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; +@@ -1015,7 +1432,8 @@ static int add_detailed_modes(struct drm_connector *connector, + switch (data->type) { + case EDID_DETAIL_MONITOR_RANGE: + if (gtf) +- modes += drm_gtf_modes_for_range(connector, timing); ++ modes += drm_gtf_modes_for_range(connector, edid, ++ timing); + break; + case EDID_DETAIL_STD_MODES: + /* Six modes per detailed section */ +@@ -1024,8 +1442,8 @@ static int add_detailed_modes(struct drm_connector *connector, + struct drm_display_mode *newmode; + + std = &data->data.timings[i]; +- newmode = drm_mode_std(dev, std, edid->revision, +- timing_level); ++ newmode = drm_mode_std(connector, edid, std, ++ edid->revision); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; +@@ -1035,6 +1453,9 @@ static int add_detailed_modes(struct drm_connector *connector, + case EDID_DETAIL_CVT_3BYTE: + modes += drm_cvt_modes(connector, timing); + break; ++ case EDID_DETAIL_EST_TIMINGS: ++ modes += drm_est3_modes(connector, timing); ++ break; + default: + break; + } +@@ -1058,7 +1479,10 @@ static int add_detailed_info(struct drm_connector *connector, + + for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { + struct detailed_timing *timing = &edid->detailed_timings[i]; +- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); ++ int preferred = (i == 0); ++ ++ if (preferred && edid->version == 1 && edid->revision < 4) ++ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); + + /* In 1.0, only timings are allowed */ + if (!timing->pixel_clock && edid->version == 1 && +@@ -1088,39 +1512,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector, + int i, modes = 0; + char *edid_ext = NULL; + struct detailed_timing *timing; +- int edid_ext_num; + int start_offset, end_offset; +- int timing_level; + +- if (edid->version == 1 && edid->revision < 3) { +- /* If the EDID version is less than 1.3, there is no +- * extension EDID. +- */ ++ if (edid->version == 1 && edid->revision < 3) + return 0; +- } +- if (!edid->extensions) { +- /* if there is no extension EDID, it is unnecessary to +- * parse the E-EDID to get detailed info +- */ ++ if (!edid->extensions) + return 0; +- } +- +- /* Chose real EDID extension number */ +- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? +- DRM_MAX_EDID_EXT_NUM : edid->extensions; + + /* Find CEA extension */ +- for (i = 0; i < edid_ext_num; i++) { ++ for (i = 0; i < edid->extensions; i++) { + edid_ext = (char *)edid + EDID_LENGTH * (i + 1); +- /* This block is CEA extension */ + if (edid_ext[0] == 0x02) + break; + } + +- if (i == edid_ext_num) { +- /* if there is no additional timing EDID block, return */ ++ if (i == edid->extensions) + return 0; +- } + + /* Get the start offset of detailed timing block */ + start_offset = edid_ext[2]; +@@ -1132,7 +1539,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, + return 0; + } + +- timing_level = standard_timing_level(edid); + end_offset = EDID_LENGTH; + end_offset -= sizeof(struct detailed_timing); + for (i = start_offset; i < end_offset; +@@ -1144,123 +1550,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, + return modes; + } + +-#define DDC_ADDR 0x50 +-/** +- * Get EDID information via I2C. +- * +- * \param adapter : i2c device adaptor +- * \param buf : EDID data buffer to be filled +- * \param len : EDID data buffer length +- * \return 0 on success or -1 on failure. +- * +- * Try to fetch EDID information by calling i2c driver function. +- */ +-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, +- unsigned char *buf, int len) +-{ +- unsigned char start = 0x0; +- struct i2c_msg msgs[] = { +- { +- .addr = DDC_ADDR, +- .flags = 0, +- .len = 1, +- .buf = &start, +- }, { +- .addr = DDC_ADDR, +- .flags = I2C_M_RD, +- .len = len, +- .buf = buf, +- } +- }; +- +- if (i2c_transfer(adapter, msgs, 2) == 2) +- return 0; +- +- return -1; +-} +-EXPORT_SYMBOL(drm_do_probe_ddc_edid); +- +-static int drm_ddc_read_edid(struct drm_connector *connector, +- struct i2c_adapter *adapter, +- char *buf, int len) +-{ +- int i; +- +- for (i = 0; i < 4; i++) { +- if (drm_do_probe_ddc_edid(adapter, buf, len)) +- return -1; +- if (drm_edid_is_valid((struct edid *)buf)) +- return 0; +- } +- +- /* repeated checksum failures; warn, but carry on */ +- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", +- drm_get_connector_name(connector)); +- return -1; +-} +- +-/** +- * drm_get_edid - get EDID data, if available +- * @connector: connector we're probing +- * @adapter: i2c adapter to use for DDC +- * +- * Poke the given connector's i2c channel to grab EDID data if possible. +- * +- * Return edid data or NULL if we couldn't find any. +- */ +-struct edid *drm_get_edid(struct drm_connector *connector, +- struct i2c_adapter *adapter) +-{ +- int ret; +- struct edid *edid; +- +- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), +- GFP_KERNEL); +- if (edid == NULL) { +- dev_warn(&connector->dev->pdev->dev, +- "Failed to allocate EDID\n"); +- goto end; +- } +- +- /* Read first EDID block */ +- ret = drm_ddc_read_edid(connector, adapter, +- (unsigned char *)edid, EDID_LENGTH); +- if (ret != 0) +- goto clean_up; +- +- /* There are EDID extensions to be read */ +- if (edid->extensions != 0) { +- int edid_ext_num = edid->extensions; +- +- if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) { +- dev_warn(&connector->dev->pdev->dev, +- "The number of extension(%d) is " +- "over max (%d), actually read number (%d)\n", +- edid_ext_num, DRM_MAX_EDID_EXT_NUM, +- DRM_MAX_EDID_EXT_NUM); +- /* Reset EDID extension number to be read */ +- edid_ext_num = DRM_MAX_EDID_EXT_NUM; +- } +- /* Read EDID including extensions too */ +- ret = drm_ddc_read_edid(connector, adapter, (char *)edid, +- EDID_LENGTH * (edid_ext_num + 1)); +- if (ret != 0) +- goto clean_up; +- +- } +- +- connector->display_info.raw_edid = (char *)edid; +- goto end; +- +-clean_up: +- kfree(edid); +- edid = NULL; +-end: +- return edid; +- +-} +-EXPORT_SYMBOL(drm_get_edid); +- + #define HDMI_IDENTIFIER 0x000C03 + #define VENDOR_BLOCK 0x03 + /** +@@ -1273,7 +1562,7 @@ EXPORT_SYMBOL(drm_get_edid); + bool drm_detect_hdmi_monitor(struct edid *edid) + { + char *edid_ext = NULL; +- int i, hdmi_id, edid_ext_num; ++ int i, hdmi_id; + int start_offset, end_offset; + bool is_hdmi = false; + +@@ -1281,19 +1570,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid) + if (edid == NULL || edid->extensions == 0) + goto end; + +- /* Chose real EDID extension number */ +- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? +- DRM_MAX_EDID_EXT_NUM : edid->extensions; +- + /* Find CEA extension */ +- for (i = 0; i < edid_ext_num; i++) { ++ for (i = 0; i < edid->extensions; i++) { + edid_ext = (char *)edid + EDID_LENGTH * (i + 1); + /* This block is CEA extension */ + if (edid_ext[0] == 0x02) + break; + } + +- if (i == edid_ext_num) ++ if (i == edid->extensions) + goto end; + + /* Data block offset in CEA extension block */ +@@ -1348,10 +1633,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) + + quirks = edid_get_quirks(edid); + +- num_modes += add_established_modes(connector, edid); +- num_modes += add_standard_modes(connector, edid); ++ /* ++ * EDID spec says modes should be preferred in this order: ++ * - preferred detailed mode ++ * - other detailed modes from base block ++ * - detailed modes from extension blocks ++ * - CVT 3-byte code modes ++ * - standard timing codes ++ * - established timing codes ++ * - modes inferred from GTF or CVT range information ++ * ++ * We don't quite implement this yet, but we're close. ++ * ++ * XXX order for additional mode types in extension blocks? ++ */ + num_modes += add_detailed_info(connector, edid, quirks); + num_modes += add_detailed_info_eedid(connector, edid, quirks); ++ num_modes += add_standard_modes(connector, edid); ++ num_modes += add_established_modes(connector, edid); + + if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) + edid_fixup_preferred(connector, quirks); +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c +index 288ea2f..08c4c92 100644 +--- a/drivers/gpu/drm/drm_fb_helper.c ++++ b/drivers/gpu/drm/drm_fb_helper.c +@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights"); + + static LIST_HEAD(kernel_fb_helper_list); + +-int drm_fb_helper_add_connector(struct drm_connector *connector) ++/* simple single crtc case helper function */ ++int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) + { +- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); +- if (!connector->fb_helper_private) +- return -ENOMEM; ++ struct drm_device *dev = fb_helper->dev; ++ struct drm_connector *connector; ++ int i; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct drm_fb_helper_connector *fb_helper_connector; ++ ++ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); ++ if (!fb_helper_connector) ++ goto fail; + ++ fb_helper_connector->connector = connector; ++ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; ++ } + return 0; ++fail: ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ kfree(fb_helper->connector_info[i]); ++ fb_helper->connector_info[i] = NULL; ++ } ++ fb_helper->connector_count = 0; ++ return -ENOMEM; + } +-EXPORT_SYMBOL(drm_fb_helper_add_connector); ++EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); + + /** + * drm_fb_helper_connector_parse_command_line - parse command line for connector +@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector); + * + * enable/enable Digital/disable bit at the end + */ +-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector, ++static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn, + const char *mode_option) + { + const char *name; +@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con + int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; + int i; + enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; +- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; + struct drm_fb_helper_cmdline_mode *cmdline_mode; ++ struct drm_connector *connector = fb_helper_conn->connector; + +- if (!fb_help_conn) ++ if (!fb_helper_conn) + return false; + +- cmdline_mode = &fb_help_conn->cmdline_mode; ++ cmdline_mode = &fb_helper_conn->cmdline_mode; + if (!mode_option) + mode_option = fb_mode_option; + +@@ -204,18 +222,21 @@ done: + return true; + } + +-int drm_fb_helper_parse_command_line(struct drm_device *dev) ++static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) + { +- struct drm_connector *connector; ++ struct drm_fb_helper_connector *fb_helper_conn; ++ int i; + +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ for (i = 0; i < fb_helper->connector_count; i++) { + char *option = NULL; + ++ fb_helper_conn = fb_helper->connector_info[i]; ++ + /* do something on return - turn off connector maybe */ +- if (fb_get_options(drm_get_connector_name(connector), &option)) ++ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option)) + continue; + +- drm_fb_helper_connector_parse_command_line(connector, option); ++ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option); + } + return 0; + } +@@ -243,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void) + int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, + void *panic_str) + { +- DRM_ERROR("panic occurred, switching back to text console\n"); ++ printk(KERN_ERR "panic occurred, switching back to text console\n"); + return drm_fb_helper_force_kernel_mode(); + return 0; + } +@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info) + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_crtc *crtc; ++ struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_encoder *encoder; + int i; + +@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info) + * For each CRTC in this fb, turn the crtc on then, + * find all associated encoders and turn them on. + */ ++ mutex_lock(&dev->mode_config.mutex); + for (i = 0; i < fb_helper->crtc_count; i++) { +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- struct drm_crtc_helper_funcs *crtc_funcs = +- crtc->helper_private; ++ crtc = fb_helper->crtc_info[i].mode_set.crtc; ++ crtc_funcs = crtc->helper_private; + +- /* Only mess with CRTCs in this fb */ +- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || +- !crtc->enabled) +- continue; ++ if (!crtc->enabled) ++ continue; + +- mutex_lock(&dev->mode_config.mutex); +- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); +- mutex_unlock(&dev->mode_config.mutex); ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); + +- /* Found a CRTC on this fb, now find encoders */ +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- if (encoder->crtc == crtc) { +- struct drm_encoder_helper_funcs *encoder_funcs; + +- encoder_funcs = encoder->helper_private; +- mutex_lock(&dev->mode_config.mutex); +- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); +- mutex_unlock(&dev->mode_config.mutex); +- } ++ /* Found a CRTC on this fb, now find encoders */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ struct drm_encoder_helper_funcs *encoder_funcs; ++ ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); + } + } + } ++ mutex_unlock(&dev->mode_config.mutex); + } + + static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) +@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_crtc *crtc; ++ struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_encoder *encoder; + int i; + +@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) + * For each CRTC in this fb, find all associated encoders + * and turn them off, then turn off the CRTC. + */ ++ mutex_lock(&dev->mode_config.mutex); + for (i = 0; i < fb_helper->crtc_count; i++) { +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- struct drm_crtc_helper_funcs *crtc_funcs = +- crtc->helper_private; ++ crtc = fb_helper->crtc_info[i].mode_set.crtc; ++ crtc_funcs = crtc->helper_private; + +- /* Only mess with CRTCs in this fb */ +- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || +- !crtc->enabled) +- continue; ++ if (!crtc->enabled) ++ continue; + +- /* Found a CRTC on this fb, now find encoders */ +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- if (encoder->crtc == crtc) { +- struct drm_encoder_helper_funcs *encoder_funcs; ++ /* Found a CRTC on this fb, now find encoders */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ if (encoder->crtc == crtc) { ++ struct drm_encoder_helper_funcs *encoder_funcs; + +- encoder_funcs = encoder->helper_private; +- mutex_lock(&dev->mode_config.mutex); +- encoder_funcs->dpms(encoder, dpms_mode); +- mutex_unlock(&dev->mode_config.mutex); +- } ++ encoder_funcs = encoder->helper_private; ++ encoder_funcs->dpms(encoder, dpms_mode); + } +- mutex_lock(&dev->mode_config.mutex); +- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); +- mutex_unlock(&dev->mode_config.mutex); + } ++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + } ++ mutex_unlock(&dev->mode_config.mutex); + } + + int drm_fb_helper_blank(int blank, struct fb_info *info) +@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) + { + int i; + ++ for (i = 0; i < helper->connector_count; i++) ++ kfree(helper->connector_info[i]); ++ kfree(helper->connector_info); + for (i = 0; i < helper->crtc_count; i++) + kfree(helper->crtc_info[i].mode_set.connectors); + kfree(helper->crtc_info); + } + +-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count) ++int drm_fb_helper_init(struct drm_device *dev, ++ struct drm_fb_helper *fb_helper, ++ int crtc_count, int max_conn_count) + { +- struct drm_device *dev = helper->dev; + struct drm_crtc *crtc; + int ret = 0; + int i; + +- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); +- if (!helper->crtc_info) ++ fb_helper->dev = dev; ++ ++ INIT_LIST_HEAD(&fb_helper->kernel_fb_list); ++ ++ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); ++ if (!fb_helper->crtc_info) + return -ENOMEM; + +- helper->crtc_count = crtc_count; ++ fb_helper->crtc_count = crtc_count; ++ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL); ++ if (!fb_helper->connector_info) { ++ kfree(fb_helper->crtc_info); ++ return -ENOMEM; ++ } ++ fb_helper->connector_count = 0; + + for (i = 0; i < crtc_count; i++) { +- helper->crtc_info[i].mode_set.connectors = ++ fb_helper->crtc_info[i].mode_set.connectors = + kcalloc(max_conn_count, + sizeof(struct drm_connector *), + GFP_KERNEL); + +- if (!helper->crtc_info[i].mode_set.connectors) { ++ if (!fb_helper->crtc_info[i].mode_set.connectors) { + ret = -ENOMEM; + goto out_free; + } +- helper->crtc_info[i].mode_set.num_connectors = 0; ++ fb_helper->crtc_info[i].mode_set.num_connectors = 0; + } + + i = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- helper->crtc_info[i].crtc_id = crtc->base.id; +- helper->crtc_info[i].mode_set.crtc = crtc; ++ fb_helper->crtc_info[i].crtc_id = crtc->base.id; ++ fb_helper->crtc_info[i].mode_set.crtc = crtc; + i++; + } +- helper->conn_limit = max_conn_count; ++ fb_helper->conn_limit = max_conn_count; + return 0; + out_free: +- drm_fb_helper_crtc_free(helper); ++ drm_fb_helper_crtc_free(fb_helper); + return -ENOMEM; + } +-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); ++EXPORT_SYMBOL(drm_fb_helper_init); ++ ++void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) ++{ ++ if (!list_empty(&fb_helper->kernel_fb_list)) { ++ list_del(&fb_helper->kernel_fb_list); ++ if (list_empty(&kernel_fb_helper_list)) { ++ printk(KERN_INFO "drm: unregistered panic notifier\n"); ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &paniced); ++ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); ++ } ++ } ++ ++ drm_fb_helper_crtc_free(fb_helper); ++ ++} ++EXPORT_SYMBOL(drm_fb_helper_fini); + + static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, u16 regno, struct fb_info *info) +@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, + int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) + { + struct drm_fb_helper *fb_helper = info->par; +- struct drm_device *dev = fb_helper->dev; ++ struct drm_crtc_helper_funcs *crtc_funcs; + u16 *red, *green, *blue, *transp; + struct drm_crtc *crtc; + int i, rc = 0; + int start; + +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; +- for (i = 0; i < fb_helper->crtc_count; i++) { +- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) +- break; +- } +- if (i == fb_helper->crtc_count) +- continue; ++ for (i = 0; i < fb_helper->crtc_count; i++) { ++ crtc = fb_helper->crtc_info[i].mode_set.crtc; ++ crtc_funcs = crtc->helper_private; + + red = cmap->red; + green = cmap->green; +@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) + } + EXPORT_SYMBOL(drm_fb_helper_setcmap); + +-int drm_fb_helper_setcolreg(unsigned regno, +- unsigned red, +- unsigned green, +- unsigned blue, +- unsigned transp, +- struct fb_info *info) +-{ +- struct drm_fb_helper *fb_helper = info->par; +- struct drm_device *dev = fb_helper->dev; +- struct drm_crtc *crtc; +- int i; +- int ret; +- +- if (regno > 255) +- return 1; +- +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; +- for (i = 0; i < fb_helper->crtc_count; i++) { +- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) +- break; +- } +- if (i == fb_helper->crtc_count) +- continue; +- +- ret = setcolreg(crtc, red, green, blue, regno, info); +- if (ret) +- return ret; +- +- crtc_funcs->load_lut(crtc); +- } +- return 0; +-} +-EXPORT_SYMBOL(drm_fb_helper_setcolreg); +- + int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) + { +@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info) + return -EINVAL; + } + +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- +- for (i = 0; i < fb_helper->crtc_count; i++) { +- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) +- break; +- } +- if (i == fb_helper->crtc_count) +- continue; +- +- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { +- mutex_lock(&dev->mode_config.mutex); +- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); ++ mutex_lock(&dev->mode_config.mutex); ++ for (i = 0; i < fb_helper->crtc_count; i++) { ++ crtc = fb_helper->crtc_info[i].mode_set.crtc; ++ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); ++ if (ret) { + mutex_unlock(&dev->mode_config.mutex); +- if (ret) +- return ret; ++ return ret; + } + } ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ if (fb_helper->delayed_hotplug) { ++ fb_helper->delayed_hotplug = false; ++ drm_fb_helper_hotplug_event(fb_helper); ++ } + return 0; + } + EXPORT_SYMBOL(drm_fb_helper_set_par); +@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + int ret = 0; + int i; + +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- for (i = 0; i < fb_helper->crtc_count; i++) { +- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) +- break; +- } +- +- if (i == fb_helper->crtc_count) +- continue; ++ mutex_lock(&dev->mode_config.mutex); ++ for (i = 0; i < fb_helper->crtc_count; i++) { ++ crtc = fb_helper->crtc_info[i].mode_set.crtc; + + modeset = &fb_helper->crtc_info[i].mode_set; + +@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + modeset->y = var->yoffset; + + if (modeset->num_connectors) { +- mutex_lock(&dev->mode_config.mutex); + ret = crtc->funcs->set_config(modeset); +- mutex_unlock(&dev->mode_config.mutex); + if (!ret) { + info->var.xoffset = var->xoffset; + info->var.yoffset = var->yoffset; + } + } + } ++ mutex_unlock(&dev->mode_config.mutex); + return ret; + } + EXPORT_SYMBOL(drm_fb_helper_pan_display); + +-int drm_fb_helper_single_fb_probe(struct drm_device *dev, +- int preferred_bpp, +- int (*fb_create)(struct drm_device *dev, +- uint32_t fb_width, +- uint32_t fb_height, +- uint32_t surface_width, +- uint32_t surface_height, +- uint32_t surface_depth, +- uint32_t surface_bpp, +- struct drm_framebuffer **fb_ptr)) ++int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, ++ int preferred_bpp) + { +- struct drm_crtc *crtc; +- struct drm_connector *connector; +- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; +- unsigned int surface_width = 0, surface_height = 0; + int new_fb = 0; + int crtc_count = 0; +- int ret, i, conn_count = 0; ++ int i; + struct fb_info *info; +- struct drm_framebuffer *fb; +- struct drm_mode_set *modeset = NULL; +- struct drm_fb_helper *fb_helper; +- uint32_t surface_depth = 24, surface_bpp = 32; ++ struct drm_fb_helper_surface_size sizes; ++ int gamma_size = 0; ++ ++ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); ++ sizes.surface_depth = 24; ++ sizes.surface_bpp = 32; ++ sizes.fb_width = (unsigned)-1; ++ sizes.fb_height = (unsigned)-1; + + /* if driver picks 8 or 16 by default use that + for both depth/bpp */ +- if (preferred_bpp != surface_bpp) { +- surface_depth = surface_bpp = preferred_bpp; ++ if (preferred_bpp != sizes.surface_bpp) { ++ sizes.surface_depth = sizes.surface_bpp = preferred_bpp; + } + /* first up get a count of crtcs now in use and new min/maxes width/heights */ +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; +- ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; + struct drm_fb_helper_cmdline_mode *cmdline_mode; + +- if (!fb_help_conn) +- continue; +- +- cmdline_mode = &fb_help_conn->cmdline_mode; ++ cmdline_mode = &fb_helper_conn->cmdline_mode; + + if (cmdline_mode->bpp_specified) { + switch (cmdline_mode->bpp) { + case 8: +- surface_depth = surface_bpp = 8; ++ sizes.surface_depth = sizes.surface_bpp = 8; + break; + case 15: +- surface_depth = 15; +- surface_bpp = 16; ++ sizes.surface_depth = 15; ++ sizes.surface_bpp = 16; + break; + case 16: +- surface_depth = surface_bpp = 16; ++ sizes.surface_depth = sizes.surface_bpp = 16; + break; + case 24: +- surface_depth = surface_bpp = 24; ++ sizes.surface_depth = sizes.surface_bpp = 24; + break; + case 32: +- surface_depth = 24; +- surface_bpp = 32; ++ sizes.surface_depth = 24; ++ sizes.surface_bpp = 32; + break; + } + break; + } + } + +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- if (drm_helper_crtc_in_use(crtc)) { +- if (crtc->desired_mode) { +- if (crtc->desired_mode->hdisplay < fb_width) +- fb_width = crtc->desired_mode->hdisplay; +- +- if (crtc->desired_mode->vdisplay < fb_height) +- fb_height = crtc->desired_mode->vdisplay; +- +- if (crtc->desired_mode->hdisplay > surface_width) +- surface_width = crtc->desired_mode->hdisplay; +- +- if (crtc->desired_mode->vdisplay > surface_height) +- surface_height = crtc->desired_mode->vdisplay; +- } ++ crtc_count = 0; ++ for (i = 0; i < fb_helper->crtc_count; i++) { ++ struct drm_display_mode *desired_mode; ++ desired_mode = fb_helper->crtc_info[i].desired_mode; ++ ++ if (desired_mode) { ++ if (gamma_size == 0) ++ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; ++ if (desired_mode->hdisplay < sizes.fb_width) ++ sizes.fb_width = desired_mode->hdisplay; ++ if (desired_mode->vdisplay < sizes.fb_height) ++ sizes.fb_height = desired_mode->vdisplay; ++ if (desired_mode->hdisplay > sizes.surface_width) ++ sizes.surface_width = desired_mode->hdisplay; ++ if (desired_mode->vdisplay > sizes.surface_height) ++ sizes.surface_height = desired_mode->vdisplay; + crtc_count++; + } + } + +- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { ++ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { + /* hmm everyone went away - assume VGA cable just fell out + and will come back later. */ +- return 0; ++ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); ++ sizes.fb_width = sizes.surface_width = 1024; ++ sizes.fb_height = sizes.surface_height = 768; + } + +- /* do we have an fb already? */ +- if (list_empty(&dev->mode_config.fb_kernel_list)) { +- ret = (*fb_create)(dev, fb_width, fb_height, surface_width, +- surface_height, surface_depth, surface_bpp, +- &fb); +- if (ret) +- return -EINVAL; +- new_fb = 1; +- } else { +- fb = list_first_entry(&dev->mode_config.fb_kernel_list, +- struct drm_framebuffer, filp_head); +- +- /* if someone hotplugs something bigger than we have already allocated, we are pwned. +- As really we can't resize an fbdev that is in the wild currently due to fbdev +- not really being designed for the lower layers moving stuff around under it. +- - so in the grand style of things - punt. */ +- if ((fb->width < surface_width) || +- (fb->height < surface_height)) { +- DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); +- return -EINVAL; +- } +- } +- +- info = fb->fbdev; +- fb_helper = info->par; +- +- crtc_count = 0; +- /* okay we need to setup new connector sets in the crtcs */ +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- modeset = &fb_helper->crtc_info[crtc_count].mode_set; +- modeset->fb = fb; +- conn_count = 0; +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- if (connector->encoder) +- if (connector->encoder->crtc == modeset->crtc) { +- modeset->connectors[conn_count] = connector; +- conn_count++; +- if (conn_count > fb_helper->conn_limit) +- BUG(); +- } +- } +- +- for (i = conn_count; i < fb_helper->conn_limit; i++) +- modeset->connectors[i] = NULL; ++ /* push down into drivers */ ++ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); ++ if (new_fb < 0) ++ return new_fb; + +- modeset->crtc = crtc; +- crtc_count++; ++ info = fb_helper->fbdev; + +- modeset->num_connectors = conn_count; +- if (modeset->crtc->desired_mode) { +- if (modeset->mode) +- drm_mode_destroy(dev, modeset->mode); +- modeset->mode = drm_mode_duplicate(dev, +- modeset->crtc->desired_mode); +- } ++ /* set the fb pointer */ ++ for (i = 0; i < fb_helper->crtc_count; i++) { ++ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; + } +- fb_helper->crtc_count = crtc_count; +- fb_helper->fb = fb; + + if (new_fb) { + info->var.pixclock = 0; +- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0); +- if (ret) +- return ret; + if (register_framebuffer(info) < 0) { +- fb_dealloc_cmap(&info->cmap); + return -EINVAL; + } ++ ++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, ++ info->fix.id); ++ + } else { + drm_fb_helper_set_par(info); + } +- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, +- info->fix.id); + + /* Switch back to kernel console on panic */ + /* multi card linked list maybe */ + if (list_empty(&kernel_fb_helper_list)) { +- printk(KERN_INFO "registered panic notifier\n"); ++ printk(KERN_INFO "drm: registered panic notifier\n"); + atomic_notifier_chain_register(&panic_notifier_list, + &paniced); + register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); + } +- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); ++ if (new_fb) ++ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); ++ + return 0; + } + EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); + +-void drm_fb_helper_free(struct drm_fb_helper *helper) +-{ +- list_del(&helper->kernel_fb_list); +- if (list_empty(&kernel_fb_helper_list)) { +- printk(KERN_INFO "unregistered panic notifier\n"); +- atomic_notifier_chain_unregister(&panic_notifier_list, +- &paniced); +- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); +- } +- drm_fb_helper_crtc_free(helper); +- fb_dealloc_cmap(&helper->fb->fbdev->cmap); +-} +-EXPORT_SYMBOL(drm_fb_helper_free); +- + void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) + { +@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + } + EXPORT_SYMBOL(drm_fb_helper_fill_fix); + +-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, ++void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height) + { +- info->pseudo_palette = fb->pseudo_palette; ++ struct drm_framebuffer *fb = fb_helper->fb; ++ info->pseudo_palette = fb_helper->pseudo_palette; + info->var.xres_virtual = fb->width; + info->var.yres_virtual = fb->height; + info->var.bits_per_pixel = fb->bits_per_pixel; +@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, + info->var.yres = fb_height; + } + EXPORT_SYMBOL(drm_fb_helper_fill_var); ++ ++static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, ++ uint32_t maxX, ++ uint32_t maxY) ++{ ++ struct drm_connector *connector; ++ int count = 0; ++ int i; ++ ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ connector = fb_helper->connector_info[i]->connector; ++ count += connector->funcs->fill_modes(connector, maxX, maxY); ++ } ++ ++ return count; ++} ++ ++static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) ++{ ++ struct drm_display_mode *mode; ++ ++ list_for_each_entry(mode, &fb_connector->connector->modes, head) { ++ if (drm_mode_width(mode) > width || ++ drm_mode_height(mode) > height) ++ continue; ++ if (mode->type & DRM_MODE_TYPE_PREFERRED) ++ return mode; ++ } ++ return NULL; ++} ++ ++static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) ++{ ++ struct drm_fb_helper_cmdline_mode *cmdline_mode; ++ cmdline_mode = &fb_connector->cmdline_mode; ++ return cmdline_mode->specified; ++} ++ ++static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, ++ int width, int height) ++{ ++ struct drm_fb_helper_cmdline_mode *cmdline_mode; ++ struct drm_display_mode *mode = NULL; ++ ++ cmdline_mode = &fb_helper_conn->cmdline_mode; ++ if (cmdline_mode->specified == false) ++ return mode; ++ ++ /* attempt to find a matching mode in the list of modes ++ * we have gotten so far, if not add a CVT mode that conforms ++ */ ++ if (cmdline_mode->rb || cmdline_mode->margins) ++ goto create_mode; ++ ++ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { ++ /* check width/height */ ++ if (mode->hdisplay != cmdline_mode->xres || ++ mode->vdisplay != cmdline_mode->yres) ++ continue; ++ ++ if (cmdline_mode->refresh_specified) { ++ if (mode->vrefresh != cmdline_mode->refresh) ++ continue; ++ } ++ ++ if (cmdline_mode->interlace) { ++ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) ++ continue; ++ } ++ return mode; ++ } ++ ++create_mode: ++ mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, ++ cmdline_mode->yres, ++ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, ++ cmdline_mode->rb, cmdline_mode->interlace, ++ cmdline_mode->margins); ++ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); ++ list_add(&mode->head, &fb_helper_conn->connector->modes); ++ return mode; ++} ++ ++static bool drm_connector_enabled(struct drm_connector *connector, bool strict) ++{ ++ bool enable; ++ ++ if (strict) { ++ enable = connector->status == connector_status_connected; ++ } else { ++ enable = connector->status != connector_status_disconnected; ++ } ++ return enable; ++} ++ ++static void drm_enable_connectors(struct drm_fb_helper *fb_helper, ++ bool *enabled) ++{ ++ bool any_enabled = false; ++ struct drm_connector *connector; ++ int i = 0; ++ ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ connector = fb_helper->connector_info[i]->connector; ++ enabled[i] = drm_connector_enabled(connector, true); ++ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, ++ enabled[i] ? "yes" : "no"); ++ any_enabled |= enabled[i]; ++ } ++ ++ if (any_enabled) ++ return; ++ ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ connector = fb_helper->connector_info[i]->connector; ++ enabled[i] = drm_connector_enabled(connector, false); ++ } ++} ++ ++static bool drm_target_cloned(struct drm_fb_helper *fb_helper, ++ struct drm_display_mode **modes, ++ bool *enabled, int width, int height) ++{ ++ int count, i, j; ++ bool can_clone = false; ++ struct drm_fb_helper_connector *fb_helper_conn; ++ struct drm_display_mode *dmt_mode, *mode; ++ ++ /* only contemplate cloning in the single crtc case */ ++ if (fb_helper->crtc_count > 1) ++ return false; ++ ++ count = 0; ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ if (enabled[i]) ++ count++; ++ } ++ ++ /* only contemplate cloning if more than one connector is enabled */ ++ if (count <= 1) ++ return false; ++ ++ /* check the command line or if nothing common pick 1024x768 */ ++ can_clone = true; ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ if (!enabled[i]) ++ continue; ++ fb_helper_conn = fb_helper->connector_info[i]; ++ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); ++ if (!modes[i]) { ++ can_clone = false; ++ break; ++ } ++ for (j = 0; j < i; j++) { ++ if (!enabled[j]) ++ continue; ++ if (!drm_mode_equal(modes[j], modes[i])) ++ can_clone = false; ++ } ++ } ++ ++ if (can_clone) { ++ DRM_DEBUG_KMS("can clone using command line\n"); ++ return true; ++ } ++ ++ /* try and find a 1024x768 mode on each connector */ ++ can_clone = true; ++ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); ++ ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ ++ if (!enabled[i]) ++ continue; ++ ++ fb_helper_conn = fb_helper->connector_info[i]; ++ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { ++ if (drm_mode_equal(mode, dmt_mode)) ++ modes[i] = mode; ++ } ++ if (!modes[i]) ++ can_clone = false; ++ } ++ ++ if (can_clone) { ++ DRM_DEBUG_KMS("can clone using 1024x768\n"); ++ return true; ++ } ++ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); ++ return false; ++} ++ ++static bool drm_target_preferred(struct drm_fb_helper *fb_helper, ++ struct drm_display_mode **modes, ++ bool *enabled, int width, int height) ++{ ++ struct drm_fb_helper_connector *fb_helper_conn; ++ int i; ++ ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ fb_helper_conn = fb_helper->connector_info[i]; ++ ++ if (enabled[i] == false) ++ continue; ++ ++ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", ++ fb_helper_conn->connector->base.id); ++ ++ /* got for command line mode first */ ++ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); ++ if (!modes[i]) { ++ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", ++ fb_helper_conn->connector->base.id); ++ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); ++ } ++ /* No preferred modes, pick one off the list */ ++ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { ++ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) ++ break; ++ } ++ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : ++ "none"); ++ } ++ return true; ++} ++ ++static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, ++ struct drm_fb_helper_crtc **best_crtcs, ++ struct drm_display_mode **modes, ++ int n, int width, int height) ++{ ++ int c, o; ++ struct drm_device *dev = fb_helper->dev; ++ struct drm_connector *connector; ++ struct drm_connector_helper_funcs *connector_funcs; ++ struct drm_encoder *encoder; ++ struct drm_fb_helper_crtc *best_crtc; ++ int my_score, best_score, score; ++ struct drm_fb_helper_crtc **crtcs, *crtc; ++ struct drm_fb_helper_connector *fb_helper_conn; ++ ++ if (n == fb_helper->connector_count) ++ return 0; ++ ++ fb_helper_conn = fb_helper->connector_info[n]; ++ connector = fb_helper_conn->connector; ++ ++ best_crtcs[n] = NULL; ++ best_crtc = NULL; ++ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); ++ if (modes[n] == NULL) ++ return best_score; ++ ++ crtcs = kzalloc(dev->mode_config.num_connector * ++ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); ++ if (!crtcs) ++ return best_score; ++ ++ my_score = 1; ++ if (connector->status == connector_status_connected) ++ my_score++; ++ if (drm_has_cmdline_mode(fb_helper_conn)) ++ my_score++; ++ if (drm_has_preferred_mode(fb_helper_conn, width, height)) ++ my_score++; ++ ++ connector_funcs = connector->helper_private; ++ encoder = connector_funcs->best_encoder(connector); ++ if (!encoder) ++ goto out; ++ ++ /* select a crtc for this connector and then attempt to configure ++ remaining connectors */ ++ for (c = 0; c < fb_helper->crtc_count; c++) { ++ crtc = &fb_helper->crtc_info[c]; ++ ++ if ((encoder->possible_crtcs & (1 << c)) == 0) { ++ continue; ++ } ++ ++ for (o = 0; o < n; o++) ++ if (best_crtcs[o] == crtc) ++ break; ++ ++ if (o < n) { ++ /* ignore cloning unless only a single crtc */ ++ if (fb_helper->crtc_count > 1) ++ continue; ++ ++ if (!drm_mode_equal(modes[o], modes[n])) ++ continue; ++ } ++ ++ crtcs[n] = crtc; ++ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); ++ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, ++ width, height); ++ if (score > best_score) { ++ best_crtc = crtc; ++ best_score = score; ++ memcpy(best_crtcs, crtcs, ++ dev->mode_config.num_connector * ++ sizeof(struct drm_fb_helper_crtc *)); ++ } ++ } ++out: ++ kfree(crtcs); ++ return best_score; ++} ++ ++static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) ++{ ++ struct drm_device *dev = fb_helper->dev; ++ struct drm_fb_helper_crtc **crtcs; ++ struct drm_display_mode **modes; ++ struct drm_encoder *encoder; ++ struct drm_mode_set *modeset; ++ bool *enabled; ++ int width, height; ++ int i, ret; ++ ++ DRM_DEBUG_KMS("\n"); ++ ++ width = dev->mode_config.max_width; ++ height = dev->mode_config.max_height; ++ ++ /* clean out all the encoder/crtc combos */ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ encoder->crtc = NULL; ++ } ++ ++ crtcs = kcalloc(dev->mode_config.num_connector, ++ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); ++ modes = kcalloc(dev->mode_config.num_connector, ++ sizeof(struct drm_display_mode *), GFP_KERNEL); ++ enabled = kcalloc(dev->mode_config.num_connector, ++ sizeof(bool), GFP_KERNEL); ++ ++ drm_enable_connectors(fb_helper, enabled); ++ ++ ret = drm_target_cloned(fb_helper, modes, enabled, width, height); ++ if (!ret) { ++ ret = drm_target_preferred(fb_helper, modes, enabled, width, height); ++ if (!ret) ++ DRM_ERROR("Unable to find initial modes\n"); ++ } ++ ++ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); ++ ++ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); ++ ++ /* need to set the modesets up here for use later */ ++ /* fill out the connector<->crtc mappings into the modesets */ ++ for (i = 0; i < fb_helper->crtc_count; i++) { ++ modeset = &fb_helper->crtc_info[i].mode_set; ++ modeset->num_connectors = 0; ++ } ++ ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ struct drm_display_mode *mode = modes[i]; ++ struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; ++ modeset = &fb_crtc->mode_set; ++ ++ if (mode && fb_crtc) { ++ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", ++ mode->name, fb_crtc->mode_set.crtc->base.id); ++ fb_crtc->desired_mode = mode; ++ if (modeset->mode) ++ drm_mode_destroy(dev, modeset->mode); ++ modeset->mode = drm_mode_duplicate(dev, ++ fb_crtc->desired_mode); ++ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; ++ } ++ } ++ ++ kfree(crtcs); ++ kfree(modes); ++ kfree(enabled); ++} ++ ++/** ++ * drm_helper_initial_config - setup a sane initial connector configuration ++ * @dev: DRM device ++ * ++ * LOCKING: ++ * Called at init time, must take mode config lock. ++ * ++ * Scan the CRTCs and connectors and try to put together an initial setup. ++ * At the moment, this is a cloned configuration across all heads with ++ * a new framebuffer object as the backing store. ++ * ++ * RETURNS: ++ * Zero if everything went ok, nonzero otherwise. ++ */ ++bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) ++{ ++ struct drm_device *dev = fb_helper->dev; ++ int count = 0; ++ ++ /* disable all the possible outputs/crtcs before entering KMS mode */ ++ drm_helper_disable_unused_functions(fb_helper->dev); ++ ++ drm_fb_helper_parse_command_line(fb_helper); ++ ++ count = drm_fb_helper_probe_connector_modes(fb_helper, ++ dev->mode_config.max_width, ++ dev->mode_config.max_height); ++ /* ++ * we shouldn't end up with no modes here. ++ */ ++ if (count == 0) { ++ printk(KERN_INFO "No connectors reported connected with modes\n"); ++ } ++ drm_setup_crtcs(fb_helper); ++ ++ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); ++} ++EXPORT_SYMBOL(drm_fb_helper_initial_config); ++ ++bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) ++{ ++ int count = 0; ++ u32 max_width, max_height, bpp_sel; ++ bool bound = false, crtcs_bound = false; ++ struct drm_crtc *crtc; ++ ++ if (!fb_helper->fb) ++ return false; ++ ++ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { ++ if (crtc->fb) ++ crtcs_bound = true; ++ if (crtc->fb == fb_helper->fb) ++ bound = true; ++ } ++ ++ if (!bound && crtcs_bound) { ++ fb_helper->delayed_hotplug = true; ++ return false; ++ } ++ DRM_DEBUG_KMS("\n"); ++ ++ max_width = fb_helper->fb->width; ++ max_height = fb_helper->fb->height; ++ bpp_sel = fb_helper->fb->bits_per_pixel; ++ ++ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, ++ max_height); ++ drm_setup_crtcs(fb_helper); ++ ++ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); ++} ++EXPORT_SYMBOL(drm_fb_helper_hotplug_event); ++ +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 9d532d7..e7aace2 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp, + + DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); + +- priv = kmalloc(sizeof(*priv), GFP_KERNEL); ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + +- memset(priv, 0, sizeof(*priv)); + filp->private_data = priv; + priv->filp = filp; + priv->uid = current_euid(); +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index aa89d4b..33dad3f 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev) + } + + /** ++ * Initialize an already allocate GEM object of the specified size with ++ * shmfs backing store. ++ */ ++int drm_gem_object_init(struct drm_device *dev, ++ struct drm_gem_object *obj, size_t size) ++{ ++ BUG_ON((size & (PAGE_SIZE - 1)) != 0); ++ ++ obj->dev = dev; ++ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); ++ if (IS_ERR(obj->filp)) ++ return -ENOMEM; ++ ++ kref_init(&obj->refcount); ++ kref_init(&obj->handlecount); ++ obj->size = size; ++ ++ atomic_inc(&dev->object_count); ++ atomic_add(obj->size, &dev->object_memory); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_gem_object_init); ++ ++/** + * Allocate a GEM object of the specified size with shmfs backing store + */ + struct drm_gem_object * +@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) + { + struct drm_gem_object *obj; + +- BUG_ON((size & (PAGE_SIZE - 1)) != 0); +- + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + goto free; + +- obj->dev = dev; +- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); +- if (IS_ERR(obj->filp)) ++ if (drm_gem_object_init(dev, obj, size) != 0) + goto free; + +- kref_init(&obj->refcount); +- kref_init(&obj->handlecount); +- obj->size = size; + if (dev->driver->gem_init_object != NULL && + dev->driver->gem_init_object(obj) != 0) { + goto fput; + } +- atomic_inc(&dev->object_count); +- atomic_add(obj->size, &dev->object_memory); + return obj; + fput: ++ /* Object_init mangles the global counters - readjust them. */ ++ atomic_dec(&dev->object_count); ++ atomic_sub(obj->size, &dev->object_memory); + fput(obj->filp); + free: + kfree(obj); +@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) + idr_destroy(&file_private->object_idr); + } + +-static void +-drm_gem_object_free_common(struct drm_gem_object *obj) ++void ++drm_gem_object_release(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + fput(obj->filp); + atomic_dec(&dev->object_count); + atomic_sub(obj->size, &dev->object_memory); +- kfree(obj); + } ++EXPORT_SYMBOL(drm_gem_object_release); + + /** + * Called after the last reference to the object has been lost. +@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref) + + if (dev->driver->gem_free_object != NULL) + dev->driver->gem_free_object(obj); +- +- drm_gem_object_free_common(obj); + } + EXPORT_SYMBOL(drm_gem_object_free); + +@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref) + dev->driver->gem_free_object(obj); + mutex_unlock(&dev->struct_mutex); + } +- +- drm_gem_object_free_common(obj); + } + EXPORT_SYMBOL(drm_gem_object_free_unlocked); + +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c +index 76d6339..f1f473e 100644 +--- a/drivers/gpu/drm/drm_modes.c ++++ b/drivers/gpu/drm/drm_modes.c +@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, + drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; + /* 18/16. Find actual vertical frame frequency */ + /* ignore - just set the mode flag for interlaced */ +- if (interlaced) ++ if (interlaced) { + drm_mode->vtotal *= 2; ++ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; ++ } + /* Fill the mode line name */ + drm_mode_set_name(drm_mode); + if (reduced) +@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, + else + drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_NHSYNC); +- if (interlaced) +- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + +- return drm_mode; ++ return drm_mode; + } + EXPORT_SYMBOL(drm_cvt_mode); + + /** +- * drm_gtf_mode - create the modeline based on GTF algorithm ++ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm + * + * @dev :drm device + * @hdisplay :hdisplay size + * @vdisplay :vdisplay size + * @vrefresh :vrefresh rate. + * @interlaced :whether the interlace is supported +- * @margins :whether the margin is supported ++ * @margins :desired margin size ++ * @GTF_[MCKJ] :extended GTF formula parameters + * + * LOCKING. + * none. + * +- * return the modeline based on GTF algorithm +- * +- * This function is to create the modeline based on the GTF algorithm. +- * Generalized Timing Formula is derived from: +- * GTF Spreadsheet by Andy Morrish (1/5/97) +- * available at http://www.vesa.org ++ * return the modeline based on full GTF algorithm. + * +- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. +- * What I have done is to translate it by using integer calculation. +- * I also refer to the function of fb_get_mode in the file of +- * drivers/video/fbmon.c ++ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them ++ * in here multiplied by two. For a C of 40, pass in 80. + */ +-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, +- int vdisplay, int vrefresh, +- bool interlaced, int margins) +-{ +- /* 1) top/bottom margin size (% of height) - default: 1.8, */ ++struct drm_display_mode * ++drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, ++ int vrefresh, bool interlaced, int margins, ++ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J) ++{ /* 1) top/bottom margin size (% of height) - default: 1.8, */ + #define GTF_MARGIN_PERCENTAGE 18 + /* 2) character cell horizontal granularity (pixels) - default 8 */ + #define GTF_CELL_GRAN 8 +@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, + #define H_SYNC_PERCENT 8 + /* min time of vsync + back porch (microsec) */ + #define MIN_VSYNC_PLUS_BP 550 +- /* blanking formula gradient */ +-#define GTF_M 600 +- /* blanking formula offset */ +-#define GTF_C 40 +- /* blanking formula scaling factor */ +-#define GTF_K 128 +- /* blanking formula scaling factor */ +-#define GTF_J 20 + /* C' and M' are part of the Blanking Duty Cycle computation */ +-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) +-#define GTF_M_PRIME (GTF_K * GTF_M / 256) ++#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2) ++#define GTF_M_PRIME (GTF_K * GTF_M / 256) + struct drm_display_mode *drm_mode; + unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; + int top_margin, bottom_margin; +@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, + + drm_mode->clock = pixel_freq; + +- drm_mode_set_name(drm_mode); +- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; +- + if (interlaced) { + drm_mode->vtotal *= 2; + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + ++ drm_mode_set_name(drm_mode); ++ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40) ++ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; ++ else ++ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC; ++ + return drm_mode; + } ++EXPORT_SYMBOL(drm_gtf_mode_complex); ++ ++/** ++ * drm_gtf_mode - create the modeline based on GTF algorithm ++ * ++ * @dev :drm device ++ * @hdisplay :hdisplay size ++ * @vdisplay :vdisplay size ++ * @vrefresh :vrefresh rate. ++ * @interlaced :whether the interlace is supported ++ * @margins :whether the margin is supported ++ * ++ * LOCKING. ++ * none. ++ * ++ * return the modeline based on GTF algorithm ++ * ++ * This function is to create the modeline based on the GTF algorithm. ++ * Generalized Timing Formula is derived from: ++ * GTF Spreadsheet by Andy Morrish (1/5/97) ++ * available at http://www.vesa.org ++ * ++ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. ++ * What I have done is to translate it by using integer calculation. ++ * I also refer to the function of fb_get_mode in the file of ++ * drivers/video/fbmon.c ++ * ++ * Standard GTF parameters: ++ * M = 600 ++ * C = 40 ++ * K = 128 ++ * J = 20 ++ */ ++struct drm_display_mode * ++drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, ++ bool lace, int margins) ++{ ++ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, ++ margins, 600, 40 * 2, 128, 20 * 2); ++} + EXPORT_SYMBOL(drm_gtf_mode); ++ + /** + * drm_mode_set_name - set the name on a mode + * @mode: name will be set in this mode +@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode); + */ + void drm_mode_set_name(struct drm_display_mode *mode) + { +- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, +- mode->vdisplay); ++ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); ++ ++ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s", ++ mode->hdisplay, mode->vdisplay, ++ interlaced ? "i" : ""); + } + EXPORT_SYMBOL(drm_mode_set_name); + +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index 25bbd30..3a3a451 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -333,7 +333,7 @@ static struct device_attribute connector_attrs_opt1[] = { + static struct bin_attribute edid_attr = { + .attr.name = "edid", + .attr.mode = 0444, +- .size = 128, ++ .size = 0, + .read = edid_show, + }; + +diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile +index 9929f84..da78f2c 100644 +--- a/drivers/gpu/drm/i915/Makefile ++++ b/drivers/gpu/drm/i915/Makefile +@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ + intel_fb.o \ + intel_tv.o \ + intel_dvo.o \ ++ intel_ringbuffer.o \ + intel_overlay.o \ + dvo_ch7xxx.o \ + dvo_ch7017.o \ +@@ -33,3 +34,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o + i915-$(CONFIG_COMPAT) += i915_ioc32.o + + obj-$(CONFIG_DRM_I915) += i915.o ++ ++CFLAGS_i915_trace_points.o := -I$(src) +diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h +index 288fc50..0d6ff64 100644 +--- a/drivers/gpu/drm/i915/dvo.h ++++ b/drivers/gpu/drm/i915/dvo.h +@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops { + void (*dpms)(struct intel_dvo_device *dvo, int mode); + + /* +- * Saves the output's state for restoration on VT switch. +- */ +- void (*save)(struct intel_dvo_device *dvo); +- +- /* +- * Restore's the output's state at VT switch. +- */ +- void (*restore)(struct intel_dvo_device *dvo); +- +- /* + * Callback for testing a video mode for a given output. + * + * This function should only check for cases where a mode can't +diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c +index 1184c14..14d5980 100644 +--- a/drivers/gpu/drm/i915/dvo_ch7017.c ++++ b/drivers/gpu/drm/i915/dvo_ch7017.c +@@ -159,16 +159,7 @@ + #define CH7017_BANG_LIMIT_CONTROL 0x7f + + struct ch7017_priv { +- uint8_t save_hapi; +- uint8_t save_vali; +- uint8_t save_valo; +- uint8_t save_ailo; +- uint8_t save_lvds_pll_vco; +- uint8_t save_feedback_div; +- uint8_t save_lvds_control_2; +- uint8_t save_outputs_enable; +- uint8_t save_lvds_power_down; +- uint8_t save_power_management; ++ uint8_t dummy; + }; + + static void ch7017_dump_regs(struct intel_dvo_device *dvo); +@@ -401,39 +392,6 @@ do { \ + DUMP(CH7017_LVDS_POWER_DOWN); + } + +-static void ch7017_save(struct intel_dvo_device *dvo) +-{ +- struct ch7017_priv *priv = dvo->dev_priv; +- +- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); +- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); +- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); +- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); +- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); +- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); +- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); +- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); +- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); +-} +- +-static void ch7017_restore(struct intel_dvo_device *dvo) +-{ +- struct ch7017_priv *priv = dvo->dev_priv; +- +- /* Power down before changing mode */ +- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); +- +- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); +- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); +- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); +- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); +- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); +- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); +- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); +- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); +- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); +-} +- + static void ch7017_destroy(struct intel_dvo_device *dvo) + { + struct ch7017_priv *priv = dvo->dev_priv; +@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = { + .mode_set = ch7017_mode_set, + .dpms = ch7017_dpms, + .dump_regs = ch7017_dump_regs, +- .save = ch7017_save, +- .restore = ch7017_restore, + .destroy = ch7017_destroy, + }; +diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c +index d56ff5c..6f1944b 100644 +--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c ++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c +@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct { + { CH7301_VID, "CH7301" }, + }; + +-struct ch7xxx_reg_state { +- uint8_t regs[CH7xxx_NUM_REGS]; +-}; +- + struct ch7xxx_priv { + bool quiet; +- +- struct ch7xxx_reg_state save_reg; +- struct ch7xxx_reg_state mode_reg; +- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; +- uint8_t save_TLPF, save_TCT, save_PM, save_IDF; + }; + +-static void ch7xxx_save(struct intel_dvo_device *dvo); +- + static char *ch7xxx_get_id(uint8_t vid) + { + int i; +@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) + + static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) + { +- struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + int i; + + for (i = 0; i < CH7xxx_NUM_REGS; i++) { ++ uint8_t val; + if ((i % 8) == 0 ) + DRM_LOG_KMS("\n %02X: ", i); +- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); ++ ch7xxx_readb(dvo, i, &val); ++ DRM_LOG_KMS("%02X ", val); + } + } + +-static void ch7xxx_save(struct intel_dvo_device *dvo) +-{ +- struct ch7xxx_priv *ch7xxx= dvo->dev_priv; +- +- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); +- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); +- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); +- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); +- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); +- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); +- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); +-} +- +-static void ch7xxx_restore(struct intel_dvo_device *dvo) +-{ +- struct ch7xxx_priv *ch7xxx = dvo->dev_priv; +- +- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); +- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); +- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); +- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); +- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); +- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); +- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); +-} +- + static void ch7xxx_destroy(struct intel_dvo_device *dvo) + { + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; +@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = { + .mode_set = ch7xxx_mode_set, + .dpms = ch7xxx_dpms, + .dump_regs = ch7xxx_dump_regs, +- .save = ch7xxx_save, +- .restore = ch7xxx_restore, + .destroy = ch7xxx_destroy, + }; +diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c +index 24169e5..a2ec3f4 100644 +--- a/drivers/gpu/drm/i915/dvo_ivch.c ++++ b/drivers/gpu/drm/i915/dvo_ivch.c +@@ -153,9 +153,6 @@ struct ivch_priv { + bool quiet; + + uint16_t width, height; +- +- uint16_t save_VR01; +- uint16_t save_VR40; + }; + + +@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) + DRM_LOG_KMS("VR8F: 0x%04x\n", val); + } + +-static void ivch_save(struct intel_dvo_device *dvo) +-{ +- struct ivch_priv *priv = dvo->dev_priv; +- +- ivch_read(dvo, VR01, &priv->save_VR01); +- ivch_read(dvo, VR40, &priv->save_VR40); +-} +- +-static void ivch_restore(struct intel_dvo_device *dvo) +-{ +- struct ivch_priv *priv = dvo->dev_priv; +- +- ivch_write(dvo, VR01, priv->save_VR01); +- ivch_write(dvo, VR40, priv->save_VR40); +-} +- + static void ivch_destroy(struct intel_dvo_device *dvo) + { + struct ivch_priv *priv = dvo->dev_priv; +@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo) + struct intel_dvo_dev_ops ivch_ops= { + .init = ivch_init, + .dpms = ivch_dpms, +- .save = ivch_save, +- .restore = ivch_restore, + .mode_valid = ivch_mode_valid, + .mode_set = ivch_mode_set, + .detect = ivch_detect, +diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c +index 0001c13..9b8e676 100644 +--- a/drivers/gpu/drm/i915/dvo_sil164.c ++++ b/drivers/gpu/drm/i915/dvo_sil164.c +@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + #define SIL164_REGC 0x0c + +-struct sil164_save_rec { +- uint8_t reg8; +- uint8_t reg9; +- uint8_t regc; +-}; +- + struct sil164_priv { + //I2CDevRec d; + bool quiet; +- struct sil164_save_rec save_regs; +- struct sil164_save_rec mode_regs; + }; + + #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) +@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) + DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); + } + +-static void sil164_save(struct intel_dvo_device *dvo) +-{ +- struct sil164_priv *sil= dvo->dev_priv; +- +- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) +- return; +- +- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) +- return; +- +- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) +- return; +- +- return; +-} +- +-static void sil164_restore(struct intel_dvo_device *dvo) +-{ +- struct sil164_priv *sil = dvo->dev_priv; +- +- /* Restore it powered down initially */ +- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); +- +- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); +- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); +- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); +-} +- + static void sil164_destroy(struct intel_dvo_device *dvo) + { + struct sil164_priv *sil = dvo->dev_priv; +@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = { + .mode_set = sil164_mode_set, + .dpms = sil164_dpms, + .dump_regs = sil164_dump_regs, +- .save = sil164_save, +- .restore = sil164_restore, + .destroy = sil164_destroy, + }; +diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c +index c7c391b..66c697b 100644 +--- a/drivers/gpu/drm/i915/dvo_tfp410.c ++++ b/drivers/gpu/drm/i915/dvo_tfp410.c +@@ -86,16 +86,8 @@ + #define TFP410_V_RES_LO 0x3C + #define TFP410_V_RES_HI 0x3D + +-struct tfp410_save_rec { +- uint8_t ctl1; +- uint8_t ctl2; +-}; +- + struct tfp410_priv { + bool quiet; +- +- struct tfp410_save_rec saved_reg; +- struct tfp410_save_rec mode_reg; + }; + + static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) + DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); + } + +-static void tfp410_save(struct intel_dvo_device *dvo) +-{ +- struct tfp410_priv *tfp = dvo->dev_priv; +- +- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) +- return; +- +- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) +- return; +-} +- +-static void tfp410_restore(struct intel_dvo_device *dvo) +-{ +- struct tfp410_priv *tfp = dvo->dev_priv; +- +- /* Restore it powered down initially */ +- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); +- +- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); +- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); +-} +- + static void tfp410_destroy(struct intel_dvo_device *dvo) + { + struct tfp410_priv *tfp = dvo->dev_priv; +@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = { + .mode_set = tfp410_mode_set, + .dpms = tfp410_dpms, + .dump_regs = tfp410_dump_regs, +- .save = tfp410_save, +- .restore = tfp410_restore, + .destroy = tfp410_destroy, + }; +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index a0b8447..52510ad 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + lock = &dev_priv->mm.active_list_lock; +- head = &dev_priv->mm.active_list; ++ head = &dev_priv->render_ring.active_list; + break; + case INACTIVE_LIST: + seq_printf(m, "Inactive:\n"); +@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) + spin_lock(lock); + list_for_each_entry(obj_priv, head, list) + { +- struct drm_gem_object *obj = obj_priv->obj; +- + seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", +- obj, ++ &obj_priv->base, + get_pin_flag(obj_priv), +- obj->size, +- obj->read_domains, obj->write_domain, ++ obj_priv->base.size, ++ obj_priv->base.read_domains, ++ obj_priv->base.write_domain, + obj_priv->last_rendering_seqno, + obj_priv->dirty ? " dirty" : "", + obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); + +- if (obj->name) +- seq_printf(m, " (name: %d)", obj->name); ++ if (obj_priv->base.name) ++ seq_printf(m, " (name: %d)", obj_priv->base.name); + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", obj_priv->fence_reg); + if (obj_priv->gtt_space != NULL) +@@ -130,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data) + struct drm_i915_gem_request *gem_request; + + seq_printf(m, "Request:\n"); +- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { ++ list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, ++ list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); +@@ -144,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (dev_priv->hw_status_page != NULL) { ++ if (dev_priv->render_ring.status_page.page_addr != NULL) { + seq_printf(m, "Current sequence: %d\n", +- i915_get_gem_seqno(dev)); ++ i915_get_gem_seqno(dev, &dev_priv->render_ring)); + } else { + seq_printf(m, "Current sequence: hws uninitialized\n"); + } +@@ -196,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) + } + seq_printf(m, "Interrupts received: %d\n", + atomic_read(&dev_priv->irq_received)); +- if (dev_priv->hw_status_page != NULL) { ++ if (dev_priv->render_ring.status_page.page_addr != NULL) { + seq_printf(m, "Current sequence: %d\n", +- i915_get_gem_seqno(dev)); ++ i915_get_gem_seqno(dev, &dev_priv->render_ring)); + } else { + seq_printf(m, "Current sequence: hws uninitialized\n"); + } +@@ -252,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data) + int i; + volatile u32 *hws; + +- hws = (volatile u32 *)dev_priv->hw_status_page; ++ hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; + if (hws == NULL) + return 0; + +@@ -288,8 +288,9 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) + + spin_lock(&dev_priv->mm.active_list_lock); + +- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { +- obj = obj_priv->obj; ++ list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, ++ list) { ++ obj = &obj_priv->base; + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { + ret = i915_gem_object_get_pages(obj, 0); + if (ret) { +@@ -318,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) + u8 *virt; + uint32_t *ptr, off; + +- if (!dev_priv->ring.ring_obj) { ++ if (!dev_priv->render_ring.gem_object) { + seq_printf(m, "No ringbuffer setup\n"); + return 0; + } + +- virt = dev_priv->ring.virtual_start; ++ virt = dev_priv->render_ring.virtual_start; + +- for (off = 0; off < dev_priv->ring.Size; off += 4) { ++ for (off = 0; off < dev_priv->render_ring.size; off += 4) { + ptr = (uint32_t *)(virt + off); + seq_printf(m, "%08x : %08x\n", off, *ptr); + } +@@ -345,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) + + seq_printf(m, "RingHead : %08x\n", head); + seq_printf(m, "RingTail : %08x\n", tail); +- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); ++ seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); + + return 0; +@@ -490,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u16 rgvswctl = I915_READ16(MEMSWCTL); ++ u16 rgvstat = I915_READ16(MEMSTAT_ILK); + +- seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); +- seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); +- seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, +- rgvswctl & 0x3f); ++ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); ++ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); ++ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> ++ MEMSTAT_VID_SHIFT); ++ seq_printf(m, "Current P-state: %d\n", ++ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + + return 0; + } +@@ -509,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) + + for (i = 0; i < 16; i++) { + delayfreq = I915_READ(PXVFREQ_BASE + i * 4); +- seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); ++ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, ++ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); + } + + return 0; +@@ -542,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused) + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 rgvmodectl = I915_READ(MEMMODECTL); ++ u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); ++ u16 crstandvid = I915_READ16(CRSTANDVID); + + seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? + "yes" : "no"); +@@ -556,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused) + rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); + seq_printf(m, "Starting frequency: P%d\n", + (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); +- seq_printf(m, "Max frequency: P%d\n", ++ seq_printf(m, "Max P-state: P%d\n", + (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); +- seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); ++ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); ++ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); ++ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); ++ seq_printf(m, "Render standby enabled: %s\n", ++ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); + + return 0; + } +@@ -567,23 +578,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused) + { + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; +- struct drm_crtc *crtc; + drm_i915_private_t *dev_priv = dev->dev_private; +- bool fbc_enabled = false; + +- if (!dev_priv->display.fbc_enabled) { ++ if (!I915_HAS_FBC(dev)) { + seq_printf(m, "FBC unsupported on this chipset\n"); + return 0; + } + +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +- if (!crtc->enabled) +- continue; +- if (dev_priv->display.fbc_enabled(crtc)) +- fbc_enabled = true; +- } +- +- if (fbc_enabled) { ++ if (intel_fbc_enabled(dev)) { + seq_printf(m, "FBC enabled\n"); + } else { + seq_printf(m, "FBC disabled: "); +@@ -631,6 +633,36 @@ static int i915_sr_status(struct seq_file *m, void *unused) + return 0; + } + ++static int i915_emon_status(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ unsigned long temp, chipset, gfx; ++ ++ temp = i915_mch_val(dev_priv); ++ chipset = i915_chipset_val(dev_priv); ++ gfx = i915_gfx_val(dev_priv); ++ ++ seq_printf(m, "GMCH temp: %ld\n", temp); ++ seq_printf(m, "Chipset power: %ld\n", chipset); ++ seq_printf(m, "GFX power: %ld\n", gfx); ++ seq_printf(m, "Total power: %ld\n", chipset + gfx); ++ ++ return 0; ++} ++ ++static int i915_gfxec(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); ++ ++ return 0; ++} ++ + static int + i915_wedged_open(struct inode *inode, + struct file *filp) +@@ -753,6 +785,8 @@ static struct drm_info_list i915_debugfs_list[] = { + {"i915_delayfreq_table", i915_delayfreq_table, 0}, + {"i915_inttoext_table", i915_inttoext_table, 0}, + {"i915_drpc_info", i915_drpc_info, 0}, ++ {"i915_emon_status", i915_emon_status, 0}, ++ {"i915_gfxec", i915_gfxec, 0}, + {"i915_fbc_status", i915_fbc_status, 0}, + {"i915_sr_status", i915_sr_status, 0}, + }; +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index c3cfafc..59a2bf8 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -40,84 +40,6 @@ + #include + #include + +-/* Really want an OS-independent resettable timer. Would like to have +- * this loop run for (eg) 3 sec, but have the timer reset every time +- * the head pointer changes, so that EBUSY only happens if the ring +- * actually stalls for (eg) 3 seconds. +- */ +-int i915_wait_ring(struct drm_device * dev, int n, const char *caller) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- drm_i915_ring_buffer_t *ring = &(dev_priv->ring); +- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; +- u32 last_acthd = I915_READ(acthd_reg); +- u32 acthd; +- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- int i; +- +- trace_i915_ring_wait_begin (dev); +- +- for (i = 0; i < 100000; i++) { +- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- acthd = I915_READ(acthd_reg); +- ring->space = ring->head - (ring->tail + 8); +- if (ring->space < 0) +- ring->space += ring->Size; +- if (ring->space >= n) { +- trace_i915_ring_wait_end (dev); +- return 0; +- } +- +- if (dev->primary->master) { +- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; +- if (master_priv->sarea_priv) +- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; +- } +- +- +- if (ring->head != last_head) +- i = 0; +- if (acthd != last_acthd) +- i = 0; +- +- last_head = ring->head; +- last_acthd = acthd; +- msleep_interruptible(10); +- +- } +- +- trace_i915_ring_wait_end (dev); +- return -EBUSY; +-} +- +-/* As a ringbuffer is only allowed to wrap between instructions, fill +- * the tail with NOOPs. +- */ +-int i915_wrap_ring(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- volatile unsigned int *virt; +- int rem; +- +- rem = dev_priv->ring.Size - dev_priv->ring.tail; +- if (dev_priv->ring.space < rem) { +- int ret = i915_wait_ring(dev, rem, __func__); +- if (ret) +- return ret; +- } +- dev_priv->ring.space -= rem; +- +- virt = (unsigned int *) +- (dev_priv->ring.virtual_start + dev_priv->ring.tail); +- rem /= 4; +- while (rem--) +- *virt++ = MI_NOOP; +- +- dev_priv->ring.tail = 0; +- +- return 0; +-} +- + /** + * Sets up the hardware status page for devices that need a physical address + * in the register. +@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } +- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; ++ dev_priv->render_ring.status_page.page_addr ++ = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + +- memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); + + if (IS_I965G(dev)) + dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & +@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev) + dev_priv->status_page_dmah = NULL; + } + +- if (dev_priv->status_gfx_addr) { +- dev_priv->status_gfx_addr = 0; ++ if (dev_priv->render_ring.status_page.gfx_addr) { ++ dev_priv->render_ring.status_page.gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + } + +@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; +- drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ struct intel_ring_buffer *ring = &dev_priv->render_ring; + + /* + * We should never lose context on the ring with modesetting +@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) +- ring->space += ring->Size; ++ ring->space += ring->size; + + if (!dev->primary->master) + return; +@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev) + if (dev->irq_enabled) + drm_irq_uninstall(dev); + +- if (dev_priv->ring.virtual_start) { +- drm_core_ioremapfree(&dev_priv->ring.map, dev); +- dev_priv->ring.virtual_start = NULL; +- dev_priv->ring.map.handle = NULL; +- dev_priv->ring.map.size = 0; +- } ++ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); ++ if (HAS_BSD(dev)) ++ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + + /* Clear the HWS virtual address at teardown */ + if (I915_NEED_GFX_HWS(dev)) +@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) + } + + if (init->ring_size != 0) { +- if (dev_priv->ring.ring_obj != NULL) { ++ if (dev_priv->render_ring.gem_object != NULL) { + i915_dma_cleanup(dev); + DRM_ERROR("Client tried to initialize ringbuffer in " + "GEM mode\n"); + return -EINVAL; + } + +- dev_priv->ring.Size = init->ring_size; ++ dev_priv->render_ring.size = init->ring_size; + +- dev_priv->ring.map.offset = init->ring_start; +- dev_priv->ring.map.size = init->ring_size; +- dev_priv->ring.map.type = 0; +- dev_priv->ring.map.flags = 0; +- dev_priv->ring.map.mtrr = 0; ++ dev_priv->render_ring.map.offset = init->ring_start; ++ dev_priv->render_ring.map.size = init->ring_size; ++ dev_priv->render_ring.map.type = 0; ++ dev_priv->render_ring.map.flags = 0; ++ dev_priv->render_ring.map.mtrr = 0; + +- drm_core_ioremap_wc(&dev_priv->ring.map, dev); ++ drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); + +- if (dev_priv->ring.map.handle == NULL) { ++ if (dev_priv->render_ring.map.handle == NULL) { + i915_dma_cleanup(dev); + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); +@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) + } + } + +- dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; + + dev_priv->cpp = init->cpp; + dev_priv->back_offset = init->back_offset; +@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + ++ struct intel_ring_buffer *ring; + DRM_DEBUG_DRIVER("%s\n", __func__); + +- if (dev_priv->ring.map.handle == NULL) { ++ ring = &dev_priv->render_ring; ++ ++ if (ring->map.handle == NULL) { + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; + } + + /* Program Hardware Status Page */ +- if (!dev_priv->hw_status_page) { ++ if (!ring->status_page.page_addr) { + DRM_ERROR("Can not find hardware status page\n"); + return -EINVAL; + } + DRM_DEBUG_DRIVER("hw status page @ %p\n", +- dev_priv->hw_status_page); +- +- if (dev_priv->status_gfx_addr != 0) +- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); ++ ring->status_page.page_addr); ++ if (ring->status_page.gfx_addr != 0) ++ ring->setup_status_page(dev, ring); + else + I915_WRITE(HWS_PGA, dev_priv->dma_status_page); ++ + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); + + return 0; +@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) + { + drm_i915_private_t *dev_priv = dev->dev_private; + int i; +- RING_LOCALS; + +- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) ++ if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) + return -EINVAL; + + BEGIN_LP_RING((dwords+1)&~1); +@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev, + struct drm_clip_rect *boxes, + int i, int DR1, int DR4) + { +- drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_clip_rect box = boxes[i]; +- RING_LOCALS; + + if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { + DRM_ERROR("Bad box %d,%d..%d,%d\n", +@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; +- RING_LOCALS; + + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) +@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, + drm_i915_batchbuffer_t * batch, + struct drm_clip_rect *cliprects) + { +- drm_i915_private_t *dev_priv = dev->dev_private; + int nbox = batch->num_cliprects; + int i = 0, count; +- RING_LOCALS; + + if ((batch->start | batch->used) & 0x7) { + DRM_ERROR("alignment"); +@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev) + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = + dev->primary->master->driver_priv; +- RING_LOCALS; + + if (!master_priv->sarea_priv) + return -EINVAL; +@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev) + drm_i915_private_t *dev_priv = dev->dev_private; + + i915_kernel_lost_context(dev); +- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); ++ return intel_wait_ring_buffer(dev, &dev_priv->render_ring, ++ dev_priv->render_ring.size - 8); + } + + static int i915_flush_ioctl(struct drm_device *dev, void *data, +@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data, + /* depends on GEM */ + value = dev_priv->has_gem; + break; ++ case I915_PARAM_HAS_BSD: ++ value = HAS_BSD(dev); ++ break; + default: + DRM_DEBUG_DRIVER("Unknown parameter %d\n", + param->param); +@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, + { + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_hws_addr_t *hws = data; ++ struct intel_ring_buffer *ring = &dev_priv->render_ring; + + if (!I915_NEED_GFX_HWS(dev)) + return -EINVAL; +@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, + + DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); + +- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); ++ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); + + dev_priv->hws_map.offset = dev->agp->base + hws->addr; + dev_priv->hws_map.size = 4*1024; +@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data, + drm_core_ioremap_wc(&dev_priv->hws_map, dev); + if (dev_priv->hws_map.handle == NULL) { + i915_dma_cleanup(dev); +- dev_priv->status_gfx_addr = 0; ++ ring->status_page.gfx_addr = 0; + DRM_ERROR("can not ioremap virtual address for" + " G33 hw status page\n"); + return -ENOMEM; + } +- dev_priv->hw_status_page = dev_priv->hws_map.handle; ++ ring->status_page.page_addr = dev_priv->hws_map.handle; ++ memset(ring->status_page.page_addr, 0, PAGE_SIZE); ++ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); + +- memset(dev_priv->hw_status_page, 0, PAGE_SIZE); +- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", +- dev_priv->status_gfx_addr); ++ ring->status_page.gfx_addr); + DRM_DEBUG_DRIVER("load hws at %p\n", +- dev_priv->hw_status_page); ++ ring->status_page.page_addr); + return 0; + } + +@@ -1357,13 +1278,12 @@ static void i915_setup_compression(struct drm_device *dev, int size) + + dev_priv->cfb_size = size; + ++ intel_disable_fbc(dev); + dev_priv->compressed_fb = compressed_fb; + + if (IS_GM45(dev)) { +- g4x_disable_fbc(dev); + I915_WRITE(DPFC_CB_BASE, compressed_fb->start); + } else { +- i8xx_disable_fbc(dev); + I915_WRITE(FBC_CFB_BASE, cfb_base); + I915_WRITE(FBC_LL_BASE, ll_base); + dev_priv->compressed_llb = compressed_llb; +@@ -1400,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { +- printk(KERN_INFO "i915: switched off\n"); ++ printk(KERN_INFO "i915: switched on\n"); + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume(dev); ++ drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_ERR "i915: switched off\n"); ++ drm_kms_helper_poll_disable(dev); + i915_suspend(dev, pmm); + } + } +@@ -1480,19 +1402,19 @@ static int i915_load_modeset_init(struct drm_device *dev, + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret) +- goto destroy_ringbuffer; ++ goto cleanup_ringbuffer; + + ret = vga_switcheroo_register_client(dev->pdev, + i915_switcheroo_set_state, + i915_switcheroo_can_switch); + if (ret) +- goto destroy_ringbuffer; ++ goto cleanup_vga_client; + + intel_modeset_init(dev); + + ret = drm_irq_install(dev); + if (ret) +- goto destroy_ringbuffer; ++ goto cleanup_vga_switcheroo; + + /* Always safe in the mode setting case. */ + /* FIXME: do pre/post-mode set stuff in core KMS code */ +@@ -1504,11 +1426,20 @@ static int i915_load_modeset_init(struct drm_device *dev, + + I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); + +- drm_helper_initial_config(dev); ++ ret = intel_fbdev_init(dev); ++ if (ret) ++ goto cleanup_irq; + ++ drm_kms_helper_poll_init(dev); + return 0; + +-destroy_ringbuffer: ++cleanup_irq: ++ drm_irq_uninstall(dev); ++cleanup_vga_switcheroo: ++ vga_switcheroo_unregister_client(dev->pdev); ++cleanup_vga_client: ++ vga_client_register(dev->pdev, NULL, NULL, NULL); ++cleanup_ringbuffer: + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + mutex_unlock(&dev->struct_mutex); +@@ -1540,14 +1471,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) + master->driver_priv = NULL; + } + +-static void i915_get_mem_freq(struct drm_device *dev) ++static void i915_pineview_get_mem_freq(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + u32 tmp; + +- if (!IS_PINEVIEW(dev)) +- return; +- + tmp = I915_READ(CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { +@@ -1576,8 +1504,525 @@ static void i915_get_mem_freq(struct drm_device *dev) + dev_priv->mem_freq = 800; + break; + } ++ ++ /* detect pineview DDR3 setting */ ++ tmp = I915_READ(CSHRDDR3CTL); ++ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; ++} ++ ++static void i915_ironlake_get_mem_freq(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u16 ddrpll, csipll; ++ ++ ddrpll = I915_READ16(DDRMPLL1); ++ csipll = I915_READ16(CSIPLL0); ++ ++ switch (ddrpll & 0xff) { ++ case 0xc: ++ dev_priv->mem_freq = 800; ++ break; ++ case 0x10: ++ dev_priv->mem_freq = 1066; ++ break; ++ case 0x14: ++ dev_priv->mem_freq = 1333; ++ break; ++ case 0x18: ++ dev_priv->mem_freq = 1600; ++ break; ++ default: ++ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", ++ ddrpll & 0xff); ++ dev_priv->mem_freq = 0; ++ break; ++ } ++ ++ dev_priv->r_t = dev_priv->mem_freq; ++ ++ switch (csipll & 0x3ff) { ++ case 0x00c: ++ dev_priv->fsb_freq = 3200; ++ break; ++ case 0x00e: ++ dev_priv->fsb_freq = 3733; ++ break; ++ case 0x010: ++ dev_priv->fsb_freq = 4266; ++ break; ++ case 0x012: ++ dev_priv->fsb_freq = 4800; ++ break; ++ case 0x014: ++ dev_priv->fsb_freq = 5333; ++ break; ++ case 0x016: ++ dev_priv->fsb_freq = 5866; ++ break; ++ case 0x018: ++ dev_priv->fsb_freq = 6400; ++ break; ++ default: ++ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", ++ csipll & 0x3ff); ++ dev_priv->fsb_freq = 0; ++ break; ++ } ++ ++ if (dev_priv->fsb_freq == 3200) { ++ dev_priv->c_m = 0; ++ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { ++ dev_priv->c_m = 1; ++ } else { ++ dev_priv->c_m = 2; ++ } ++} ++ ++struct v_table { ++ u8 vid; ++ unsigned long vd; /* in .1 mil */ ++ unsigned long vm; /* in .1 mil */ ++ u8 pvid; ++}; ++ ++static struct v_table v_table[] = { ++ { 0, 16125, 15000, 0x7f, }, ++ { 1, 16000, 14875, 0x7e, }, ++ { 2, 15875, 14750, 0x7d, }, ++ { 3, 15750, 14625, 0x7c, }, ++ { 4, 15625, 14500, 0x7b, }, ++ { 5, 15500, 14375, 0x7a, }, ++ { 6, 15375, 14250, 0x79, }, ++ { 7, 15250, 14125, 0x78, }, ++ { 8, 15125, 14000, 0x77, }, ++ { 9, 15000, 13875, 0x76, }, ++ { 10, 14875, 13750, 0x75, }, ++ { 11, 14750, 13625, 0x74, }, ++ { 12, 14625, 13500, 0x73, }, ++ { 13, 14500, 13375, 0x72, }, ++ { 14, 14375, 13250, 0x71, }, ++ { 15, 14250, 13125, 0x70, }, ++ { 16, 14125, 13000, 0x6f, }, ++ { 17, 14000, 12875, 0x6e, }, ++ { 18, 13875, 12750, 0x6d, }, ++ { 19, 13750, 12625, 0x6c, }, ++ { 20, 13625, 12500, 0x6b, }, ++ { 21, 13500, 12375, 0x6a, }, ++ { 22, 13375, 12250, 0x69, }, ++ { 23, 13250, 12125, 0x68, }, ++ { 24, 13125, 12000, 0x67, }, ++ { 25, 13000, 11875, 0x66, }, ++ { 26, 12875, 11750, 0x65, }, ++ { 27, 12750, 11625, 0x64, }, ++ { 28, 12625, 11500, 0x63, }, ++ { 29, 12500, 11375, 0x62, }, ++ { 30, 12375, 11250, 0x61, }, ++ { 31, 12250, 11125, 0x60, }, ++ { 32, 12125, 11000, 0x5f, }, ++ { 33, 12000, 10875, 0x5e, }, ++ { 34, 11875, 10750, 0x5d, }, ++ { 35, 11750, 10625, 0x5c, }, ++ { 36, 11625, 10500, 0x5b, }, ++ { 37, 11500, 10375, 0x5a, }, ++ { 38, 11375, 10250, 0x59, }, ++ { 39, 11250, 10125, 0x58, }, ++ { 40, 11125, 10000, 0x57, }, ++ { 41, 11000, 9875, 0x56, }, ++ { 42, 10875, 9750, 0x55, }, ++ { 43, 10750, 9625, 0x54, }, ++ { 44, 10625, 9500, 0x53, }, ++ { 45, 10500, 9375, 0x52, }, ++ { 46, 10375, 9250, 0x51, }, ++ { 47, 10250, 9125, 0x50, }, ++ { 48, 10125, 9000, 0x4f, }, ++ { 49, 10000, 8875, 0x4e, }, ++ { 50, 9875, 8750, 0x4d, }, ++ { 51, 9750, 8625, 0x4c, }, ++ { 52, 9625, 8500, 0x4b, }, ++ { 53, 9500, 8375, 0x4a, }, ++ { 54, 9375, 8250, 0x49, }, ++ { 55, 9250, 8125, 0x48, }, ++ { 56, 9125, 8000, 0x47, }, ++ { 57, 9000, 7875, 0x46, }, ++ { 58, 8875, 7750, 0x45, }, ++ { 59, 8750, 7625, 0x44, }, ++ { 60, 8625, 7500, 0x43, }, ++ { 61, 8500, 7375, 0x42, }, ++ { 62, 8375, 7250, 0x41, }, ++ { 63, 8250, 7125, 0x40, }, ++ { 64, 8125, 7000, 0x3f, }, ++ { 65, 8000, 6875, 0x3e, }, ++ { 66, 7875, 6750, 0x3d, }, ++ { 67, 7750, 6625, 0x3c, }, ++ { 68, 7625, 6500, 0x3b, }, ++ { 69, 7500, 6375, 0x3a, }, ++ { 70, 7375, 6250, 0x39, }, ++ { 71, 7250, 6125, 0x38, }, ++ { 72, 7125, 6000, 0x37, }, ++ { 73, 7000, 5875, 0x36, }, ++ { 74, 6875, 5750, 0x35, }, ++ { 75, 6750, 5625, 0x34, }, ++ { 76, 6625, 5500, 0x33, }, ++ { 77, 6500, 5375, 0x32, }, ++ { 78, 6375, 5250, 0x31, }, ++ { 79, 6250, 5125, 0x30, }, ++ { 80, 6125, 5000, 0x2f, }, ++ { 81, 6000, 4875, 0x2e, }, ++ { 82, 5875, 4750, 0x2d, }, ++ { 83, 5750, 4625, 0x2c, }, ++ { 84, 5625, 4500, 0x2b, }, ++ { 85, 5500, 4375, 0x2a, }, ++ { 86, 5375, 4250, 0x29, }, ++ { 87, 5250, 4125, 0x28, }, ++ { 88, 5125, 4000, 0x27, }, ++ { 89, 5000, 3875, 0x26, }, ++ { 90, 4875, 3750, 0x25, }, ++ { 91, 4750, 3625, 0x24, }, ++ { 92, 4625, 3500, 0x23, }, ++ { 93, 4500, 3375, 0x22, }, ++ { 94, 4375, 3250, 0x21, }, ++ { 95, 4250, 3125, 0x20, }, ++ { 96, 4125, 3000, 0x1f, }, ++ { 97, 4125, 3000, 0x1e, }, ++ { 98, 4125, 3000, 0x1d, }, ++ { 99, 4125, 3000, 0x1c, }, ++ { 100, 4125, 3000, 0x1b, }, ++ { 101, 4125, 3000, 0x1a, }, ++ { 102, 4125, 3000, 0x19, }, ++ { 103, 4125, 3000, 0x18, }, ++ { 104, 4125, 3000, 0x17, }, ++ { 105, 4125, 3000, 0x16, }, ++ { 106, 4125, 3000, 0x15, }, ++ { 107, 4125, 3000, 0x14, }, ++ { 108, 4125, 3000, 0x13, }, ++ { 109, 4125, 3000, 0x12, }, ++ { 110, 4125, 3000, 0x11, }, ++ { 111, 4125, 3000, 0x10, }, ++ { 112, 4125, 3000, 0x0f, }, ++ { 113, 4125, 3000, 0x0e, }, ++ { 114, 4125, 3000, 0x0d, }, ++ { 115, 4125, 3000, 0x0c, }, ++ { 116, 4125, 3000, 0x0b, }, ++ { 117, 4125, 3000, 0x0a, }, ++ { 118, 4125, 3000, 0x09, }, ++ { 119, 4125, 3000, 0x08, }, ++ { 120, 1125, 0, 0x07, }, ++ { 121, 1000, 0, 0x06, }, ++ { 122, 875, 0, 0x05, }, ++ { 123, 750, 0, 0x04, }, ++ { 124, 625, 0, 0x03, }, ++ { 125, 500, 0, 0x02, }, ++ { 126, 375, 0, 0x01, }, ++ { 127, 0, 0, 0x00, }, ++}; ++ ++struct cparams { ++ int i; ++ int t; ++ int m; ++ int c; ++}; ++ ++static struct cparams cparams[] = { ++ { 1, 1333, 301, 28664 }, ++ { 1, 1066, 294, 24460 }, ++ { 1, 800, 294, 25192 }, ++ { 0, 1333, 276, 27605 }, ++ { 0, 1066, 276, 27605 }, ++ { 0, 800, 231, 23784 }, ++}; ++ ++unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) ++{ ++ u64 total_count, diff, ret; ++ u32 count1, count2, count3, m = 0, c = 0; ++ unsigned long now = jiffies_to_msecs(jiffies), diff1; ++ int i; ++ ++ diff1 = now - dev_priv->last_time1; ++ ++ count1 = I915_READ(DMIEC); ++ count2 = I915_READ(DDREC); ++ count3 = I915_READ(CSIEC); ++ ++ total_count = count1 + count2 + count3; ++ ++ /* FIXME: handle per-counter overflow */ ++ if (total_count < dev_priv->last_count1) { ++ diff = ~0UL - dev_priv->last_count1; ++ diff += total_count; ++ } else { ++ diff = total_count - dev_priv->last_count1; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(cparams); i++) { ++ if (cparams[i].i == dev_priv->c_m && ++ cparams[i].t == dev_priv->r_t) { ++ m = cparams[i].m; ++ c = cparams[i].c; ++ break; ++ } ++ } ++ ++ div_u64(diff, diff1); ++ ret = ((m * diff) + c); ++ div_u64(ret, 10); ++ ++ dev_priv->last_count1 = total_count; ++ dev_priv->last_time1 = now; ++ ++ return ret; ++} ++ ++unsigned long i915_mch_val(struct drm_i915_private *dev_priv) ++{ ++ unsigned long m, x, b; ++ u32 tsfs; ++ ++ tsfs = I915_READ(TSFS); ++ ++ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); ++ x = I915_READ8(TR1); ++ ++ b = tsfs & TSFS_INTR_MASK; ++ ++ return ((m * x) / 127) - b; + } + ++static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) ++{ ++ unsigned long val = 0; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(v_table); i++) { ++ if (v_table[i].pvid == pxvid) { ++ if (IS_MOBILE(dev_priv->dev)) ++ val = v_table[i].vm; ++ else ++ val = v_table[i].vd; ++ } ++ } ++ ++ return val; ++} ++ ++void i915_update_gfx_val(struct drm_i915_private *dev_priv) ++{ ++ struct timespec now, diff1; ++ u64 diff; ++ unsigned long diffms; ++ u32 count; ++ ++ getrawmonotonic(&now); ++ diff1 = timespec_sub(now, dev_priv->last_time2); ++ ++ /* Don't divide by 0 */ ++ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; ++ if (!diffms) ++ return; ++ ++ count = I915_READ(GFXEC); ++ ++ if (count < dev_priv->last_count2) { ++ diff = ~0UL - dev_priv->last_count2; ++ diff += count; ++ } else { ++ diff = count - dev_priv->last_count2; ++ } ++ ++ dev_priv->last_count2 = count; ++ dev_priv->last_time2 = now; ++ ++ /* More magic constants... */ ++ diff = diff * 1181; ++ div_u64(diff, diffms * 10); ++ dev_priv->gfx_power = diff; ++} ++ ++unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) ++{ ++ unsigned long t, corr, state1, corr2, state2; ++ u32 pxvid, ext_v; ++ ++ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); ++ pxvid = (pxvid >> 24) & 0x7f; ++ ext_v = pvid_to_extvid(dev_priv, pxvid); ++ ++ state1 = ext_v; ++ ++ t = i915_mch_val(dev_priv); ++ ++ /* Revel in the empirically derived constants */ ++ ++ /* Correction factor in 1/100000 units */ ++ if (t > 80) ++ corr = ((t * 2349) + 135940); ++ else if (t >= 50) ++ corr = ((t * 964) + 29317); ++ else /* < 50 */ ++ corr = ((t * 301) + 1004); ++ ++ corr = corr * ((150142 * state1) / 10000 - 78642); ++ corr /= 100000; ++ corr2 = (corr * dev_priv->corr); ++ ++ state2 = (corr2 * state1) / 10000; ++ state2 /= 100; /* convert to mW */ ++ ++ i915_update_gfx_val(dev_priv); ++ ++ return dev_priv->gfx_power + state2; ++} ++ ++/* Global for IPS driver to get at the current i915 device */ ++static struct drm_i915_private *i915_mch_dev; ++/* ++ * Lock protecting IPS related data structures ++ * - i915_mch_dev ++ * - dev_priv->max_delay ++ * - dev_priv->min_delay ++ * - dev_priv->fmax ++ * - dev_priv->gpu_busy ++ */ ++DEFINE_SPINLOCK(mchdev_lock); ++ ++/** ++ * i915_read_mch_val - return value for IPS use ++ * ++ * Calculate and return a value for the IPS driver to use when deciding whether ++ * we have thermal and power headroom to increase CPU or GPU power budget. ++ */ ++unsigned long i915_read_mch_val(void) ++{ ++ struct drm_i915_private *dev_priv; ++ unsigned long chipset_val, graphics_val, ret = 0; ++ ++ spin_lock(&mchdev_lock); ++ if (!i915_mch_dev) ++ goto out_unlock; ++ dev_priv = i915_mch_dev; ++ ++ chipset_val = i915_chipset_val(dev_priv); ++ graphics_val = i915_gfx_val(dev_priv); ++ ++ ret = chipset_val + graphics_val; ++ ++out_unlock: ++ spin_unlock(&mchdev_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i915_read_mch_val); ++ ++/** ++ * i915_gpu_raise - raise GPU frequency limit ++ * ++ * Raise the limit; IPS indicates we have thermal headroom. ++ */ ++bool i915_gpu_raise(void) ++{ ++ struct drm_i915_private *dev_priv; ++ bool ret = true; ++ ++ spin_lock(&mchdev_lock); ++ if (!i915_mch_dev) { ++ ret = false; ++ goto out_unlock; ++ } ++ dev_priv = i915_mch_dev; ++ ++ if (dev_priv->max_delay > dev_priv->fmax) ++ dev_priv->max_delay--; ++ ++out_unlock: ++ spin_unlock(&mchdev_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i915_gpu_raise); ++ ++/** ++ * i915_gpu_lower - lower GPU frequency limit ++ * ++ * IPS indicates we're close to a thermal limit, so throttle back the GPU ++ * frequency maximum. ++ */ ++bool i915_gpu_lower(void) ++{ ++ struct drm_i915_private *dev_priv; ++ bool ret = true; ++ ++ spin_lock(&mchdev_lock); ++ if (!i915_mch_dev) { ++ ret = false; ++ goto out_unlock; ++ } ++ dev_priv = i915_mch_dev; ++ ++ if (dev_priv->max_delay < dev_priv->min_delay) ++ dev_priv->max_delay++; ++ ++out_unlock: ++ spin_unlock(&mchdev_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i915_gpu_lower); ++ ++/** ++ * i915_gpu_busy - indicate GPU business to IPS ++ * ++ * Tell the IPS driver whether or not the GPU is busy. ++ */ ++bool i915_gpu_busy(void) ++{ ++ struct drm_i915_private *dev_priv; ++ bool ret = false; ++ ++ spin_lock(&mchdev_lock); ++ if (!i915_mch_dev) ++ goto out_unlock; ++ dev_priv = i915_mch_dev; ++ ++ ret = dev_priv->busy; ++ ++out_unlock: ++ spin_unlock(&mchdev_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i915_gpu_busy); ++ ++/** ++ * i915_gpu_turbo_disable - disable graphics turbo ++ * ++ * Disable graphics turbo by resetting the max frequency and setting the ++ * current frequency to the default. ++ */ ++bool i915_gpu_turbo_disable(void) ++{ ++ struct drm_i915_private *dev_priv; ++ bool ret = true; ++ ++ spin_lock(&mchdev_lock); ++ if (!i915_mch_dev) { ++ ret = false; ++ goto out_unlock; ++ } ++ dev_priv = i915_mch_dev; ++ ++ dev_priv->max_delay = dev_priv->fstart; ++ ++ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) ++ ret = false; ++ ++out_unlock: ++ spin_unlock(&mchdev_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); ++ + /** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device +@@ -1591,11 +2036,10 @@ static void i915_get_mem_freq(struct drm_device *dev) + */ + int i915_driver_load(struct drm_device *dev, unsigned long flags) + { +- struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv; + resource_size_t base, size; + int ret = 0, mmio_bar; + uint32_t agp_size, prealloc_size, prealloc_start; +- + /* i915 has 4 more counters */ + dev->counters += 4; + dev->types[6] = _DRM_STAT_IRQ; +@@ -1673,6 +2117,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + dev_priv->has_gem = 0; + } + ++ if (dev_priv->has_gem == 0 && ++ drm_core_check_feature(dev, DRIVER_MODESET)) { ++ DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); ++ ret = -ENODEV; ++ goto out_iomapfree; ++ } ++ + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { +@@ -1692,7 +2143,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + goto out_workqueue_free; + } + +- i915_get_mem_freq(dev); ++ if (IS_PINEVIEW(dev)) ++ i915_pineview_get_mem_freq(dev); ++ else if (IS_IRONLAKE(dev)) ++ i915_ironlake_get_mem_freq(dev); + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there +@@ -1710,7 +2164,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + + spin_lock_init(&dev_priv->user_irq_lock); + spin_lock_init(&dev_priv->error_lock); +- dev_priv->user_irq_refcount = 0; + dev_priv->trace_irq_seqno = 0; + + ret = drm_vblank_init(dev, I915_NUM_PIPE); +@@ -1723,6 +2176,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + /* Start out suspended */ + dev_priv->mm.suspended = 1; + ++ intel_detect_pch(dev); ++ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = i915_load_modeset_init(dev, prealloc_start, + prealloc_size, agp_size); +@@ -1737,6 +2192,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + + setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, + (unsigned long) dev); ++ ++ spin_lock(&mchdev_lock); ++ i915_mch_dev = dev_priv; ++ dev_priv->mchdev_lock = &mchdev_lock; ++ spin_unlock(&mchdev_lock); ++ + return 0; + + out_workqueue_free: +@@ -1758,6 +2219,10 @@ int i915_driver_unload(struct drm_device *dev) + + i915_destroy_error_state(dev); + ++ spin_lock(&mchdev_lock); ++ i915_mch_dev = NULL; ++ spin_unlock(&mchdev_lock); ++ + destroy_workqueue(dev_priv->wq); + del_timer_sync(&dev_priv->hangcheck_timer); + +@@ -1769,6 +2234,8 @@ int i915_driver_unload(struct drm_device *dev) + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ intel_modeset_cleanup(dev); ++ + /* + * free the memory space allocated for the child device + * config parsed from VBT +@@ -1792,8 +2259,6 @@ int i915_driver_unload(struct drm_device *dev) + intel_opregion_free(dev, 0); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- intel_modeset_cleanup(dev); +- + i915_gem_free_all_phys_object(dev); + + mutex_lock(&dev->struct_mutex); +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index cc03537..423dc90 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -60,95 +60,95 @@ extern int intel_agp_enabled; + .subdevice = PCI_ANY_ID, \ + .driver_data = (unsigned long) info } + +-const static struct intel_device_info intel_i830_info = { ++static const struct intel_device_info intel_i830_info = { + .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + }; + +-const static struct intel_device_info intel_845g_info = { ++static const struct intel_device_info intel_845g_info = { + .is_i8xx = 1, + }; + +-const static struct intel_device_info intel_i85x_info = { ++static const struct intel_device_info intel_i85x_info = { + .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .cursor_needs_physical = 1, + }; + +-const static struct intel_device_info intel_i865g_info = { ++static const struct intel_device_info intel_i865g_info = { + .is_i8xx = 1, + }; + +-const static struct intel_device_info intel_i915g_info = { ++static const struct intel_device_info intel_i915g_info = { + .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + }; +-const static struct intel_device_info intel_i915gm_info = { ++static const struct intel_device_info intel_i915gm_info = { + .is_i9xx = 1, .is_mobile = 1, + .cursor_needs_physical = 1, + }; +-const static struct intel_device_info intel_i945g_info = { ++static const struct intel_device_info intel_i945g_info = { + .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + }; +-const static struct intel_device_info intel_i945gm_info = { ++static const struct intel_device_info intel_i945gm_info = { + .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, + .has_hotplug = 1, .cursor_needs_physical = 1, + }; + +-const static struct intel_device_info intel_i965g_info = { ++static const struct intel_device_info intel_i965g_info = { + .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_i965gm_info = { ++static const struct intel_device_info intel_i965gm_info = { + .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, + .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_g33_info = { ++static const struct intel_device_info intel_g33_info = { + .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_g45_info = { ++static const struct intel_device_info intel_g45_info = { + .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_gm45_info = { ++static const struct intel_device_info intel_gm45_info = { + .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_pineview_info = { ++static const struct intel_device_info intel_pineview_info = { + .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, + .need_gfx_hws = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_ironlake_d_info = { ++static const struct intel_device_info intel_ironlake_d_info = { + .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_ironlake_m_info = { ++static const struct intel_device_info intel_ironlake_m_info = { + .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_rc6 = 1, + .has_hotplug = 1, + }; + +-const static struct intel_device_info intel_sandybridge_d_info = { ++static const struct intel_device_info intel_sandybridge_d_info = { + .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_hotplug = 1, .is_gen6 = 1, + }; + +-const static struct intel_device_info intel_sandybridge_m_info = { ++static const struct intel_device_info intel_sandybridge_m_info = { + .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_hotplug = 1, .is_gen6 = 1, + }; + +-const static struct pci_device_id pciidlist[] = { ++static const struct pci_device_id pciidlist[] = { + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), +@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = { + MODULE_DEVICE_TABLE(pci, pciidlist); + #endif + ++#define INTEL_PCH_DEVICE_ID_MASK 0xff00 ++#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 ++ ++void intel_detect_pch (struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct pci_dev *pch; ++ ++ /* ++ * The reason to probe ISA bridge instead of Dev31:Fun0 is to ++ * make graphics device passthrough work easy for VMM, that only ++ * need to expose ISA bridge to let driver know the real hardware ++ * underneath. This is a requirement from virtualization team. ++ */ ++ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); ++ if (pch) { ++ if (pch->vendor == PCI_VENDOR_ID_INTEL) { ++ int id; ++ id = pch->device & INTEL_PCH_DEVICE_ID_MASK; ++ ++ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { ++ dev_priv->pch_type = PCH_CPT; ++ DRM_DEBUG_KMS("Found CougarPoint PCH\n"); ++ } ++ } ++ pci_dev_put(pch); ++ } ++} ++ + static int i915_drm_freeze(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -311,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) + /* + * Clear request list + */ +- i915_gem_retire_requests(dev); ++ i915_gem_retire_requests(dev, &dev_priv->render_ring); + + if (need_display) + i915_save_display(dev); +@@ -341,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags) + } + } else { + DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); ++ mutex_unlock(&dev->struct_mutex); + return -ENODEV; + } + +@@ -359,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags) + * switched away). + */ + if (drm_core_check_feature(dev, DRIVER_MODESET) || +- !dev_priv->mm.suspended) { +- drm_i915_ring_buffer_t *ring = &dev_priv->ring; +- struct drm_gem_object *obj = ring->ring_obj; +- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ++ !dev_priv->mm.suspended) { ++ struct intel_ring_buffer *ring = &dev_priv->render_ring; + dev_priv->mm.suspended = 0; +- +- /* Stop the ring if it's running. */ +- I915_WRITE(PRB0_CTL, 0); +- I915_WRITE(PRB0_TAIL, 0); +- I915_WRITE(PRB0_HEAD, 0); +- +- /* Initialize the ring. */ +- I915_WRITE(PRB0_START, obj_priv->gtt_offset); +- I915_WRITE(PRB0_CTL, +- ((obj->size - 4096) & RING_NR_PAGES) | +- RING_NO_REPORT | +- RING_VALID); +- if (!drm_core_check_feature(dev, DRIVER_MODESET)) +- i915_kernel_lost_context(dev); +- else { +- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; +- ring->space = ring->head - (ring->tail + 8); +- if (ring->space < 0) +- ring->space += ring->Size; +- } +- ++ ring->init(dev, ring); + mutex_unlock(&dev->struct_mutex); + drm_irq_uninstall(dev); + drm_irq_install(dev); +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 6e47900..2765831 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -32,6 +32,7 @@ + + #include "i915_reg.h" + #include "intel_bios.h" ++#include "intel_ringbuffer.h" + #include + + /* General customization: +@@ -55,6 +56,8 @@ enum plane { + + #define I915_NUM_PIPE 2 + ++#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) ++ + /* Interface history: + * + * 1.1: Original. +@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object { + struct drm_gem_object *cur_obj; + }; + +-typedef struct _drm_i915_ring_buffer { +- unsigned long Size; +- u8 *virtual_start; +- int head; +- int tail; +- int space; +- drm_local_map_t map; +- struct drm_gem_object *ring_obj; +-} drm_i915_ring_buffer_t; +- + struct mem_block { + struct mem_block *next; + struct mem_block *prev; +@@ -128,6 +121,7 @@ struct drm_i915_master_private { + + struct drm_i915_fence_reg { + struct drm_gem_object *obj; ++ struct list_head lru_list; + }; + + struct sdvo_device_mapping { +@@ -135,6 +129,7 @@ struct sdvo_device_mapping { + u8 slave_addr; + u8 dvo_wiring; + u8 initialized; ++ u8 ddc_pin; + }; + + struct drm_i915_error_state { +@@ -175,7 +170,7 @@ struct drm_i915_error_state { + + struct drm_i915_display_funcs { + void (*dpms)(struct drm_crtc *crtc, int mode); +- bool (*fbc_enabled)(struct drm_crtc *crtc); ++ bool (*fbc_enabled)(struct drm_device *dev); + void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); + void (*disable_fbc)(struct drm_device *dev); + int (*get_display_clock_speed)(struct drm_device *dev); +@@ -222,6 +217,13 @@ enum no_fbc_reason { + FBC_NOT_TILED, /* buffer not tiled */ + }; + ++enum intel_pch { ++ PCH_IBX, /* Ibexpeak PCH */ ++ PCH_CPT, /* Cougarpoint PCH */ ++}; ++ ++struct intel_fbdev; ++ + typedef struct drm_i915_private { + struct drm_device *dev; + +@@ -232,17 +234,15 @@ typedef struct drm_i915_private { + void __iomem *regs; + + struct pci_dev *bridge_dev; +- drm_i915_ring_buffer_t ring; ++ struct intel_ring_buffer render_ring; ++ struct intel_ring_buffer bsd_ring; + + drm_dma_handle_t *status_page_dmah; +- void *hw_status_page; + void *seqno_page; + dma_addr_t dma_status_page; + uint32_t counter; +- unsigned int status_gfx_addr; + unsigned int seqno_gfx_addr; + drm_local_map_t hws_map; +- struct drm_gem_object *hws_obj; + struct drm_gem_object *seqno_obj; + struct drm_gem_object *pwrctx; + +@@ -258,8 +258,6 @@ typedef struct drm_i915_private { + atomic_t irq_received; + /** Protects user_irq_refcount and irq_mask_reg */ + spinlock_t user_irq_lock; +- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ +- int user_irq_refcount; + u32 trace_irq_seqno; + /** Cached value of IMR to avoid reads in updating the bitfield */ + u32 irq_mask_reg; +@@ -280,6 +278,7 @@ typedef struct drm_i915_private { + struct mem_block *agp_heap; + unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; + int vblank_pipe; ++ int num_pipe; + + /* For hangcheck timer */ + #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ +@@ -325,7 +324,7 @@ typedef struct drm_i915_private { + int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ + int num_fence_regs; /* 8 on pre-965, 16 otherwise */ + +- unsigned int fsb_freq, mem_freq; ++ unsigned int fsb_freq, mem_freq, is_ddr3; + + spinlock_t error_lock; + struct drm_i915_error_state *first_error; +@@ -335,6 +334,9 @@ typedef struct drm_i915_private { + /* Display functions */ + struct drm_i915_display_funcs display; + ++ /* PCH chipset type */ ++ enum intel_pch pch_type; ++ + /* Register state */ + bool modeset_on_lid; + u8 saveLBB; +@@ -502,18 +504,7 @@ typedef struct drm_i915_private { + */ + struct list_head shrink_list; + +- /** +- * List of objects currently involved in rendering from the +- * ringbuffer. +- * +- * Includes buffers having the contents of their GPU caches +- * flushed, not necessarily primitives. last_rendering_seqno +- * represents when the rendering involved will be completed. +- * +- * A reference is held on the buffer while on this list. +- */ + spinlock_t active_list_lock; +- struct list_head active_list; + + /** + * List of objects which are not in the ringbuffer but which +@@ -551,12 +542,6 @@ typedef struct drm_i915_private { + struct list_head fence_list; + + /** +- * List of breadcrumbs associated with GPU requests currently +- * outstanding. +- */ +- struct list_head request_list; +- +- /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to +@@ -632,16 +617,31 @@ typedef struct drm_i915_private { + u8 cur_delay; + u8 min_delay; + u8 max_delay; ++ u8 fmax; ++ u8 fstart; ++ ++ u64 last_count1; ++ unsigned long last_time1; ++ u64 last_count2; ++ struct timespec last_time2; ++ unsigned long gfx_power; ++ int c_m; ++ int r_t; ++ u8 corr; ++ spinlock_t *mchdev_lock; + + enum no_fbc_reason no_fbc_reason; + + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; ++ ++ /* list of fbdev register on this device */ ++ struct intel_fbdev *fbdev; + } drm_i915_private_t; + + /** driver private structure attached to each drm_gem_object */ + struct drm_i915_gem_object { +- struct drm_gem_object *obj; ++ struct drm_gem_object base; + + /** Current space allocated to this object in the GTT, if any. */ + struct drm_mm_node *gtt_space; +@@ -651,27 +651,69 @@ struct drm_i915_gem_object { + /** This object's place on GPU write list */ + struct list_head gpu_write_list; + +- /** This object's place on the fenced object LRU */ +- struct list_head fence_list; +- + /** + * This is set if the object is on the active or flushing lists + * (has pending rendering), and is not set if it's on inactive (ready + * to be unbound). + */ +- int active; ++ unsigned int active : 1; + + /** + * This is set if the object has been written to since last bound + * to the GTT + */ +- int dirty; ++ unsigned int dirty : 1; ++ ++ /** ++ * Fence register bits (if any) for this object. Will be set ++ * as needed when mapped into the GTT. ++ * Protected by dev->struct_mutex. ++ * ++ * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) ++ */ ++ int fence_reg : 5; ++ ++ /** ++ * Used for checking the object doesn't appear more than once ++ * in an execbuffer object list. ++ */ ++ unsigned int in_execbuffer : 1; ++ ++ /** ++ * Advice: are the backing pages purgeable? ++ */ ++ unsigned int madv : 2; ++ ++ /** ++ * Refcount for the pages array. With the current locking scheme, there ++ * are at most two concurrent users: Binding a bo to the gtt and ++ * pwrite/pread using physical addresses. So two bits for a maximum ++ * of two users are enough. ++ */ ++ unsigned int pages_refcount : 2; ++#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 ++ ++ /** ++ * Current tiling mode for the object. ++ */ ++ unsigned int tiling_mode : 2; ++ ++ /** How many users have pinned this object in GTT space. The following ++ * users can each hold at most one reference: pwrite/pread, pin_ioctl ++ * (via user_pin_count), execbuffer (objects are not allowed multiple ++ * times for the same batchbuffer), and the framebuffer code. When ++ * switching/pageflipping, the framebuffer code has at most two buffers ++ * pinned per crtc. ++ * ++ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 ++ * bits with absolutely no headroom. So use 4 bits. */ ++ int pin_count : 4; ++#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + + /** AGP memory structure for our GTT binding. */ + DRM_AGP_MEM *agp_mem; + + struct page **pages; +- int pages_refcount; + + /** + * Current offset of the object in GTT space. +@@ -680,26 +722,18 @@ struct drm_i915_gem_object { + */ + uint32_t gtt_offset; + ++ /* Which ring is refering to is this object */ ++ struct intel_ring_buffer *ring; ++ + /** + * Fake offset for use by mmap(2) + */ + uint64_t mmap_offset; + +- /** +- * Fence register bits (if any) for this object. Will be set +- * as needed when mapped into the GTT. +- * Protected by dev->struct_mutex. +- */ +- int fence_reg; +- +- /** How many users have pinned this object in GTT space */ +- int pin_count; +- + /** Breadcrumb of last rendering to the buffer. */ + uint32_t last_rendering_seqno; + +- /** Current tiling mode for the object. */ +- uint32_t tiling_mode; ++ /** Current tiling stride for the object, if it's tiled. */ + uint32_t stride; + + /** Record of address bit 17 of each page at last unbind. */ +@@ -722,17 +756,6 @@ struct drm_i915_gem_object { + struct drm_i915_gem_phys_object *phys_obj; + + /** +- * Used for checking the object doesn't appear more than once +- * in an execbuffer object list. +- */ +- int in_execbuffer; +- +- /** +- * Advice: are the backing pages purgeable? +- */ +- int madv; +- +- /** + * Number of crtcs where this object is currently the fb, but + * will be page flipped away on the next vblank. When it + * reaches 0, dev_priv->pending_flip_queue will be woken up. +@@ -740,7 +763,7 @@ struct drm_i915_gem_object { + atomic_t pending_flip; + }; + +-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) ++#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) + + /** + * Request queue structure. +@@ -753,6 +776,9 @@ struct drm_i915_gem_object { + * an emission time with seqnos for tracking how far ahead of the GPU we are. + */ + struct drm_i915_gem_request { ++ /** On Which ring this request was generated */ ++ struct intel_ring_buffer *ring; ++ + /** GEM sequence number associated with this request. */ + uint32_t seqno; + +@@ -809,6 +835,11 @@ extern int i915_emit_box(struct drm_device *dev, + struct drm_clip_rect *boxes, + int i, int DR1, int DR4); + extern int i965_reset(struct drm_device *dev, u8 flags); ++extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); ++extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); ++extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); ++extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); ++ + + /* i915_irq.c */ + void i915_hangcheck_elapsed(unsigned long data); +@@ -817,9 +848,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, + struct drm_file *file_priv); + extern int i915_irq_wait(struct drm_device *dev, void *data, + struct drm_file *file_priv); +-void i915_user_irq_get(struct drm_device *dev); + void i915_trace_irq_get(struct drm_device *dev, u32 seqno); +-void i915_user_irq_put(struct drm_device *dev); + extern void i915_enable_interrupt (struct drm_device *dev); + + extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); +@@ -837,6 +866,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); + extern int i915_vblank_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv); + extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); ++extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); ++extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, ++ u32 mask); ++extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, ++ u32 mask); + + void + i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); +@@ -902,17 +936,21 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + void i915_gem_load(struct drm_device *dev); + int i915_gem_init_object(struct drm_gem_object *obj); ++struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, ++ size_t size); + void i915_gem_free_object(struct drm_gem_object *obj); + int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); + void i915_gem_object_unpin(struct drm_gem_object *obj); + int i915_gem_object_unbind(struct drm_gem_object *obj); + void i915_gem_release_mmap(struct drm_gem_object *obj); + void i915_gem_lastclose(struct drm_device *dev); +-uint32_t i915_get_gem_seqno(struct drm_device *dev); ++uint32_t i915_get_gem_seqno(struct drm_device *dev, ++ struct intel_ring_buffer *ring); + bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); + int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); + int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); +-void i915_gem_retire_requests(struct drm_device *dev); ++void i915_gem_retire_requests(struct drm_device *dev, ++ struct intel_ring_buffer *ring); + void i915_gem_retire_work_handler(struct work_struct *work); + void i915_gem_clflush_object(struct drm_gem_object *obj); + int i915_gem_object_set_domain(struct drm_gem_object *obj, +@@ -923,9 +961,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); + int i915_gem_do_init(struct drm_device *dev, unsigned long start, + unsigned long end); + int i915_gem_idle(struct drm_device *dev); +-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, +- uint32_t flush_domains); +-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); ++uint32_t i915_add_request(struct drm_device *dev, ++ struct drm_file *file_priv, ++ uint32_t flush_domains, ++ struct intel_ring_buffer *ring); ++int i915_do_wait_request(struct drm_device *dev, ++ uint32_t seqno, int interruptible, ++ struct intel_ring_buffer *ring); + int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); + int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, + int write); +@@ -998,6 +1040,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev); + extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); + extern void i8xx_disable_fbc(struct drm_device *dev); + extern void g4x_disable_fbc(struct drm_device *dev); ++extern void intel_disable_fbc(struct drm_device *dev); ++extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); ++extern bool intel_fbc_enabled(struct drm_device *dev); ++extern bool ironlake_set_drps(struct drm_device *dev, u8 val); ++extern void intel_detect_pch (struct drm_device *dev); ++extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); + + /** + * Lock test for when it's just for synchronization of ring access. +@@ -1006,7 +1054,8 @@ extern void g4x_disable_fbc(struct drm_device *dev); + * has access to the ring. + */ + #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ +- if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ ++ if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ ++ == NULL) \ + LOCK_TEST_WITH_RETURN(dev, file_priv); \ + } while (0) + +@@ -1019,35 +1068,31 @@ extern void g4x_disable_fbc(struct drm_device *dev); + #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) + #define I915_READ64(reg) readq(dev_priv->regs + (reg)) + #define POSTING_READ(reg) (void)I915_READ(reg) ++#define POSTING_READ16(reg) (void)I915_READ16(reg) + + #define I915_VERBOSE 0 + +-#define RING_LOCALS volatile unsigned int *ring_virt__; +- +-#define BEGIN_LP_RING(n) do { \ +- int bytes__ = 4*(n); \ +- if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ +- /* a wrap must occur between instructions so pad beforehand */ \ +- if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \ +- i915_wrap_ring(dev); \ +- if (unlikely (dev_priv->ring.space < bytes__)) \ +- i915_wait_ring(dev, bytes__, __func__); \ +- ring_virt__ = (unsigned int *) \ +- (dev_priv->ring.virtual_start + dev_priv->ring.tail); \ +- dev_priv->ring.tail += bytes__; \ +- dev_priv->ring.tail &= dev_priv->ring.Size - 1; \ +- dev_priv->ring.space -= bytes__; \ ++#define BEGIN_LP_RING(n) do { \ ++ drm_i915_private_t *dev_priv = dev->dev_private; \ ++ if (I915_VERBOSE) \ ++ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ ++ intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ + } while (0) + +-#define OUT_RING(n) do { \ +- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ +- *ring_virt__++ = (n); \ ++ ++#define OUT_RING(x) do { \ ++ drm_i915_private_t *dev_priv = dev->dev_private; \ ++ if (I915_VERBOSE) \ ++ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ ++ intel_ring_emit(dev, &dev_priv->render_ring, x); \ + } while (0) + + #define ADVANCE_LP_RING() do { \ ++ drm_i915_private_t *dev_priv = dev->dev_private; \ + if (I915_VERBOSE) \ +- DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ +- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ ++ DRM_DEBUG("ADVANCE_LP_RING %x\n", \ ++ dev_priv->render_ring.tail); \ ++ intel_ring_advance(dev, &dev_priv->render_ring); \ + } while(0) + + /** +@@ -1065,14 +1110,12 @@ extern void g4x_disable_fbc(struct drm_device *dev); + * + * The area from dword 0x20 to 0x3ff is available for driver usage. + */ +-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) ++#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ ++ (dev_priv->render_ring.status_page.page_addr))[reg]) + #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) + #define I915_GEM_HWS_INDEX 0x20 + #define I915_BREADCRUMB_INDEX 0x21 + +-extern int i915_wrap_ring(struct drm_device * dev); +-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); +- + #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) + + #define IS_I830(dev) ((dev)->pci_device == 0x3577) +@@ -1118,6 +1161,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + (dev)->pci_device == 0x2A42 || \ + (dev)->pci_device == 0x2E42) + ++#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) + + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte +@@ -1130,7 +1174,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) + #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) + #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ +- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) ++ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ ++ !IS_GEN6(dev)) + #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) + /* dsparb controlled by hw only */ + #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) +@@ -1144,6 +1189,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + IS_GEN6(dev)) + #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) + ++#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) ++#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) ++ + #define PRIMARY_RINGBUFFER_SIZE (128*1024) + + #endif +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index ef3d91d..9ded3da 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -35,8 +35,6 @@ + #include + #include + +-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) +- + static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); + static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); + static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); +@@ -124,7 +122,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, + args->size = roundup(args->size, PAGE_SIZE); + + /* Allocate the new object */ +- obj = drm_gem_object_alloc(dev, args->size); ++ obj = i915_gem_alloc_object(dev, args->size); + if (obj == NULL) + return -ENOMEM; + +@@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) + obj_priv->tiling_mode != I915_TILING_NONE; + } + +-static inline int ++static inline void + slow_shmem_copy(struct page *dst_page, + int dst_offset, + struct page *src_page, +@@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page, + { + char *dst_vaddr, *src_vaddr; + +- dst_vaddr = kmap_atomic(dst_page, KM_USER0); +- if (dst_vaddr == NULL) +- return -ENOMEM; +- +- src_vaddr = kmap_atomic(src_page, KM_USER1); +- if (src_vaddr == NULL) { +- kunmap_atomic(dst_vaddr, KM_USER0); +- return -ENOMEM; +- } ++ dst_vaddr = kmap(dst_page); ++ src_vaddr = kmap(src_page); + + memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); + +- kunmap_atomic(src_vaddr, KM_USER1); +- kunmap_atomic(dst_vaddr, KM_USER0); +- +- return 0; ++ kunmap(src_page); ++ kunmap(dst_page); + } + +-static inline int ++static inline void + slow_shmem_bit17_copy(struct page *gpu_page, + int gpu_offset, + struct page *cpu_page, +@@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, + cpu_page, cpu_offset, length); + } + +- gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); +- if (gpu_vaddr == NULL) +- return -ENOMEM; +- +- cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); +- if (cpu_vaddr == NULL) { +- kunmap_atomic(gpu_vaddr, KM_USER0); +- return -ENOMEM; +- } ++ gpu_vaddr = kmap(gpu_page); ++ cpu_vaddr = kmap(cpu_page); + + /* Copy the data, XORing A6 with A17 (1). The user already knows he's + * XORing with the other bits (A9 for Y, A9 and A10 for X) +@@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, + length -= this_length; + } + +- kunmap_atomic(cpu_vaddr, KM_USER1); +- kunmap_atomic(gpu_vaddr, KM_USER0); +- +- return 0; ++ kunmap(cpu_page); ++ kunmap(gpu_page); + } + + /** +@@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, + page_length = PAGE_SIZE - data_page_offset; + + if (do_bit17_swizzling) { +- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], +- shmem_page_offset, +- user_pages[data_page_index], +- data_page_offset, +- page_length, +- 1); +- } else { +- ret = slow_shmem_copy(user_pages[data_page_index], +- data_page_offset, +- obj_priv->pages[shmem_page_index], ++ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, +- page_length); ++ user_pages[data_page_index], ++ data_page_offset, ++ page_length, ++ 1); ++ } else { ++ slow_shmem_copy(user_pages[data_page_index], ++ data_page_offset, ++ obj_priv->pages[shmem_page_index], ++ shmem_page_offset, ++ page_length); + } +- if (ret) +- goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; +@@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping, + * page faults + */ + +-static inline int ++static inline void + slow_kernel_write(struct io_mapping *mapping, + loff_t gtt_base, int gtt_offset, + struct page *user_page, int user_offset, + int length) + { +- char *src_vaddr, *dst_vaddr; +- unsigned long unwritten; ++ char __iomem *dst_vaddr; ++ char *src_vaddr; + +- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); +- src_vaddr = kmap_atomic(user_page, KM_USER1); +- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, +- src_vaddr + user_offset, +- length); +- kunmap_atomic(src_vaddr, KM_USER1); +- io_mapping_unmap_atomic(dst_vaddr); +- if (unwritten) +- return -EFAULT; +- return 0; ++ dst_vaddr = io_mapping_map_wc(mapping, gtt_base); ++ src_vaddr = kmap(user_page); ++ ++ memcpy_toio(dst_vaddr + gtt_offset, ++ src_vaddr + user_offset, ++ length); ++ ++ kunmap(user_page); ++ io_mapping_unmap(dst_vaddr); + } + + static inline int +@@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + +- ret = slow_kernel_write(dev_priv->mm.gtt_mapping, +- gtt_page_base, gtt_page_offset, +- user_pages[data_page_index], +- data_page_offset, +- page_length); +- +- /* If we get a fault while copying data, then (presumably) our +- * source page isn't available. Return the error and we'll +- * retry in the slow path. +- */ +- if (ret) +- goto out_unpin_object; ++ slow_kernel_write(dev_priv->mm.gtt_mapping, ++ gtt_page_base, gtt_page_offset, ++ user_pages[data_page_index], ++ data_page_offset, ++ page_length); + + remain -= page_length; + offset += page_length; +@@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + page_length = PAGE_SIZE - data_page_offset; + + if (do_bit17_swizzling) { +- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], +- shmem_page_offset, +- user_pages[data_page_index], +- data_page_offset, +- page_length, +- 0); +- } else { +- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], ++ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, +- page_length); ++ page_length, ++ 0); ++ } else { ++ slow_shmem_copy(obj_priv->pages[shmem_page_index], ++ shmem_page_offset, ++ user_pages[data_page_index], ++ data_page_offset, ++ page_length); + } +- if (ret) +- goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; +@@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + if (obj_priv->phys_obj) + ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); + else if (obj_priv->tiling_mode == I915_TILING_NONE && +- dev->gtt_total != 0) { ++ dev->gtt_total != 0 && ++ obj->write_domain != I915_GEM_DOMAIN_CPU) { + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, +@@ -1051,7 +1020,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + * about to occur. + */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { +- list_move_tail(&obj_priv->fence_list, ++ struct drm_i915_fence_reg *reg = ++ &dev_priv->fence_regs[obj_priv->fence_reg]; ++ list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + } + +@@ -1482,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) + } + + static void +-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) ++i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, ++ struct intel_ring_buffer *ring) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ++ BUG_ON(ring == NULL); ++ obj_priv->ring = ring; + + /* Add a reference if we're newly entering the active list. */ + if (!obj_priv->active) { +@@ -1495,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) + } + /* Move from whatever list we were on to the tail of execution. */ + spin_lock(&dev_priv->mm.active_list_lock); +- list_move_tail(&obj_priv->list, +- &dev_priv->mm.active_list); ++ list_move_tail(&obj_priv->list, &ring->active_list); + spin_unlock(&dev_priv->mm.active_list_lock); + obj_priv->last_rendering_seqno = seqno; + } +@@ -1549,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) + BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + + obj_priv->last_rendering_seqno = 0; ++ obj_priv->ring = NULL; + if (obj_priv->active) { + obj_priv->active = 0; + drm_gem_object_unreference(obj); +@@ -1558,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) + + static void + i915_gem_process_flushing_list(struct drm_device *dev, +- uint32_t flush_domains, uint32_t seqno) ++ uint32_t flush_domains, uint32_t seqno, ++ struct intel_ring_buffer *ring) + { + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv, *next; +@@ -1566,20 +1541,24 @@ i915_gem_process_flushing_list(struct drm_device *dev, + list_for_each_entry_safe(obj_priv, next, + &dev_priv->mm.gpu_write_list, + gpu_write_list) { +- struct drm_gem_object *obj = obj_priv->obj; ++ struct drm_gem_object *obj = &obj_priv->base; + + if ((obj->write_domain & flush_domains) == +- obj->write_domain) { ++ obj->write_domain && ++ obj_priv->ring->ring_flag == ring->ring_flag) { + uint32_t old_write_domain = obj->write_domain; + + obj->write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); +- i915_gem_object_move_to_active(obj, seqno); ++ i915_gem_object_move_to_active(obj, seqno, ring); + + /* update the fence lru list */ +- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) +- list_move_tail(&obj_priv->fence_list, ++ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { ++ struct drm_i915_fence_reg *reg = ++ &dev_priv->fence_regs[obj_priv->fence_reg]; ++ list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); ++ } + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, +@@ -1588,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev, + } + } + +-#define PIPE_CONTROL_FLUSH(addr) \ +- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ +- PIPE_CONTROL_DEPTH_STALL); \ +- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ +- OUT_RING(0); \ +- OUT_RING(0); \ +- +-/** +- * Creates a new sequence number, emitting a write of it to the status page +- * plus an interrupt, which will trigger i915_user_interrupt_handler. +- * +- * Must be called with struct_lock held. +- * +- * Returned sequence numbers are nonzero on success. +- */ + uint32_t + i915_add_request(struct drm_device *dev, struct drm_file *file_priv, +- uint32_t flush_domains) ++ uint32_t flush_domains, struct intel_ring_buffer *ring) + { + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_file_private *i915_file_priv = NULL; + struct drm_i915_gem_request *request; + uint32_t seqno; + int was_empty; +- RING_LOCALS; + + if (file_priv != NULL) + i915_file_priv = file_priv->driver_priv; +@@ -1621,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + if (request == NULL) + return 0; + +- /* Grab the seqno we're going to make this request be, and bump the +- * next (skipping 0 so it can be the reserved no-seqno value). +- */ +- seqno = dev_priv->mm.next_gem_seqno; +- dev_priv->mm.next_gem_seqno++; +- if (dev_priv->mm.next_gem_seqno == 0) +- dev_priv->mm.next_gem_seqno++; +- +- if (HAS_PIPE_CONTROL(dev)) { +- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; +- +- /* +- * Workaround qword write incoherence by flushing the +- * PIPE_NOTIFY buffers out to memory before requesting +- * an interrupt. +- */ +- BEGIN_LP_RING(32); +- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | +- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); +- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); +- OUT_RING(seqno); +- OUT_RING(0); +- PIPE_CONTROL_FLUSH(scratch_addr); +- scratch_addr += 128; /* write to separate cachelines */ +- PIPE_CONTROL_FLUSH(scratch_addr); +- scratch_addr += 128; +- PIPE_CONTROL_FLUSH(scratch_addr); +- scratch_addr += 128; +- PIPE_CONTROL_FLUSH(scratch_addr); +- scratch_addr += 128; +- PIPE_CONTROL_FLUSH(scratch_addr); +- scratch_addr += 128; +- PIPE_CONTROL_FLUSH(scratch_addr); +- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | +- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | +- PIPE_CONTROL_NOTIFY); +- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); +- OUT_RING(seqno); +- OUT_RING(0); +- ADVANCE_LP_RING(); +- } else { +- BEGIN_LP_RING(4); +- OUT_RING(MI_STORE_DWORD_INDEX); +- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); +- OUT_RING(seqno); +- +- OUT_RING(MI_USER_INTERRUPT); +- ADVANCE_LP_RING(); +- } +- +- DRM_DEBUG_DRIVER("%d\n", seqno); ++ seqno = ring->add_request(dev, ring, file_priv, flush_domains); + + request->seqno = seqno; ++ request->ring = ring; + request->emitted_jiffies = jiffies; +- was_empty = list_empty(&dev_priv->mm.request_list); +- list_add_tail(&request->list, &dev_priv->mm.request_list); ++ was_empty = list_empty(&ring->request_list); ++ list_add_tail(&request->list, &ring->request_list); ++ + if (i915_file_priv) { + list_add_tail(&request->client_list, + &i915_file_priv->mm.request_list); +@@ -1688,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + * domain we're flushing with our flush. + */ + if (flush_domains != 0) +- i915_gem_process_flushing_list(dev, flush_domains, seqno); ++ i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); + + if (!dev_priv->mm.suspended) { + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); +@@ -1705,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + * before signalling the CPU + */ + static uint32_t +-i915_retire_commands(struct drm_device *dev) ++i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) + { +- drm_i915_private_t *dev_priv = dev->dev_private; +- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; + uint32_t flush_domains = 0; +- RING_LOCALS; + + /* The sampler always gets flushed on i965 (sigh) */ + if (IS_I965G(dev)) + flush_domains |= I915_GEM_DOMAIN_SAMPLER; +- BEGIN_LP_RING(2); +- OUT_RING(cmd); +- OUT_RING(0); /* noop */ +- ADVANCE_LP_RING(); ++ ++ ring->flush(dev, ring, ++ I915_GEM_DOMAIN_COMMAND, flush_domains); + return flush_domains; + } + +@@ -1738,14 +1649,14 @@ i915_gem_retire_request(struct drm_device *dev, + * by the ringbuffer to the flushing/inactive lists as appropriate. + */ + spin_lock(&dev_priv->mm.active_list_lock); +- while (!list_empty(&dev_priv->mm.active_list)) { ++ while (!list_empty(&request->ring->active_list)) { + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + +- obj_priv = list_first_entry(&dev_priv->mm.active_list, ++ obj_priv = list_first_entry(&request->ring->active_list, + struct drm_i915_gem_object, + list); +- obj = obj_priv->obj; ++ obj = &obj_priv->base; + + /* If the seqno being retired doesn't match the oldest in the + * list, then the oldest in the list must still be newer than +@@ -1789,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) + } + + uint32_t +-i915_get_gem_seqno(struct drm_device *dev) ++i915_get_gem_seqno(struct drm_device *dev, ++ struct intel_ring_buffer *ring) + { +- drm_i915_private_t *dev_priv = dev->dev_private; +- +- if (HAS_PIPE_CONTROL(dev)) +- return ((volatile u32 *)(dev_priv->seqno_page))[0]; +- else +- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); ++ return ring->get_gem_seqno(dev, ring); + } + + /** + * This function clears the request list as sequence numbers are passed. + */ + void +-i915_gem_retire_requests(struct drm_device *dev) ++i915_gem_retire_requests(struct drm_device *dev, ++ struct intel_ring_buffer *ring) + { + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t seqno; + +- if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) ++ if (!ring->status_page.page_addr ++ || list_empty(&ring->request_list)) + return; + +- seqno = i915_get_gem_seqno(dev); ++ seqno = i915_get_gem_seqno(dev, ring); + +- while (!list_empty(&dev_priv->mm.request_list)) { ++ while (!list_empty(&ring->request_list)) { + struct drm_i915_gem_request *request; + uint32_t retiring_seqno; + +- request = list_first_entry(&dev_priv->mm.request_list, ++ request = list_first_entry(&ring->request_list, + struct drm_i915_gem_request, + list); + retiring_seqno = request->seqno; +@@ -1835,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev) + + if (unlikely (dev_priv->trace_irq_seqno && + i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { +- i915_user_irq_put(dev); ++ ++ ring->user_irq_put(dev, ring); + dev_priv->trace_irq_seqno = 0; + } + } +@@ -1851,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work) + dev = dev_priv->dev; + + mutex_lock(&dev->struct_mutex); +- i915_gem_retire_requests(dev); ++ i915_gem_retire_requests(dev, &dev_priv->render_ring); ++ ++ if (HAS_BSD(dev)) ++ i915_gem_retire_requests(dev, &dev_priv->bsd_ring); ++ + if (!dev_priv->mm.suspended && +- !list_empty(&dev_priv->mm.request_list)) ++ (!list_empty(&dev_priv->render_ring.request_list) || ++ (HAS_BSD(dev) && ++ !list_empty(&dev_priv->bsd_ring.request_list)))) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + mutex_unlock(&dev->struct_mutex); + } + + int +-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) ++i915_do_wait_request(struct drm_device *dev, uint32_t seqno, ++ int interruptible, struct intel_ring_buffer *ring) + { + drm_i915_private_t *dev_priv = dev->dev_private; + u32 ier; +@@ -1870,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) + if (atomic_read(&dev_priv->mm.wedged)) + return -EIO; + +- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { ++ if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { + if (HAS_PCH_SPLIT(dev)) + ier = I915_READ(DEIER) | I915_READ(GTIER); + else +@@ -1884,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) + + trace_i915_gem_request_wait_begin(dev, seqno); + +- dev_priv->mm.waiting_gem_seqno = seqno; +- i915_user_irq_get(dev); ++ ring->waiting_gem_seqno = seqno; ++ ring->user_irq_get(dev, ring); + if (interruptible) +- ret = wait_event_interruptible(dev_priv->irq_queue, +- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || +- atomic_read(&dev_priv->mm.wedged)); ++ ret = wait_event_interruptible(ring->irq_queue, ++ i915_seqno_passed( ++ ring->get_gem_seqno(dev, ring), seqno) ++ || atomic_read(&dev_priv->mm.wedged)); + else +- wait_event(dev_priv->irq_queue, +- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || +- atomic_read(&dev_priv->mm.wedged)); ++ wait_event(ring->irq_queue, ++ i915_seqno_passed( ++ ring->get_gem_seqno(dev, ring), seqno) ++ || atomic_read(&dev_priv->mm.wedged)); + +- i915_user_irq_put(dev); +- dev_priv->mm.waiting_gem_seqno = 0; ++ ring->user_irq_put(dev, ring); ++ ring->waiting_gem_seqno = 0; + + trace_i915_gem_request_wait_end(dev, seqno); + } +@@ -1905,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) + + if (ret && ret != -ERESTARTSYS) + DRM_ERROR("%s returns %d (awaiting %d at %d)\n", +- __func__, ret, seqno, i915_get_gem_seqno(dev)); ++ __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); + + /* Directly dispatch request retiring. While we have the work queue + * to handle this, the waiter on a request often wants an associated +@@ -1913,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) + * a separate wait queue to handle that. + */ + if (ret == 0) +- i915_gem_retire_requests(dev); ++ i915_gem_retire_requests(dev, ring); + + return ret; + } +@@ -1923,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) + * request and object lists appropriately for that event. + */ + static int +-i915_wait_request(struct drm_device *dev, uint32_t seqno) ++i915_wait_request(struct drm_device *dev, uint32_t seqno, ++ struct intel_ring_buffer *ring) + { +- return i915_do_wait_request(dev, seqno, 1); ++ return i915_do_wait_request(dev, seqno, 1, ring); + } + + static void +@@ -1934,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev, + uint32_t flush_domains) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- uint32_t cmd; +- RING_LOCALS; +- +-#if WATCH_EXEC +- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, +- invalidate_domains, flush_domains); +-#endif +- trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, +- invalidate_domains, flush_domains); +- + if (flush_domains & I915_GEM_DOMAIN_CPU) + drm_agp_chipset_flush(dev); ++ dev_priv->render_ring.flush(dev, &dev_priv->render_ring, ++ invalidate_domains, ++ flush_domains); ++ ++ if (HAS_BSD(dev)) ++ dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, ++ invalidate_domains, ++ flush_domains); ++} + +- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { +- /* +- * read/write caches: +- * +- * I915_GEM_DOMAIN_RENDER is always invalidated, but is +- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is +- * also flushed at 2d versus 3d pipeline switches. +- * +- * read-only caches: +- * +- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if +- * MI_READ_FLUSH is set, and is always flushed on 965. +- * +- * I915_GEM_DOMAIN_COMMAND may not exist? +- * +- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is +- * invalidated when MI_EXE_FLUSH is set. +- * +- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is +- * invalidated with every MI_FLUSH. +- * +- * TLBs: +- * +- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND +- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and +- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER +- * are flushed at any MI_FLUSH. +- */ +- +- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; +- if ((invalidate_domains|flush_domains) & +- I915_GEM_DOMAIN_RENDER) +- cmd &= ~MI_NO_WRITE_FLUSH; +- if (!IS_I965G(dev)) { +- /* +- * On the 965, the sampler cache always gets flushed +- * and this bit is reserved. +- */ +- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) +- cmd |= MI_READ_FLUSH; +- } +- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) +- cmd |= MI_EXE_FLUSH; +- +-#if WATCH_EXEC +- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); +-#endif +- BEGIN_LP_RING(2); +- OUT_RING(cmd); +- OUT_RING(MI_NOOP); +- ADVANCE_LP_RING(); +- } ++static void ++i915_gem_flush_ring(struct drm_device *dev, ++ uint32_t invalidate_domains, ++ uint32_t flush_domains, ++ struct intel_ring_buffer *ring) ++{ ++ if (flush_domains & I915_GEM_DOMAIN_CPU) ++ drm_agp_chipset_flush(dev); ++ ring->flush(dev, ring, ++ invalidate_domains, ++ flush_domains); + } + + /** +@@ -2025,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) + DRM_INFO("%s: object %p wait for seqno %08x\n", + __func__, obj, obj_priv->last_rendering_seqno); + #endif +- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); ++ ret = i915_wait_request(dev, ++ obj_priv->last_rendering_seqno, obj_priv->ring); + if (ret != 0) + return ret; + } +@@ -2119,7 +1998,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) + + /* Try to find the smallest clean object */ + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { +- struct drm_gem_object *obj = obj_priv->obj; ++ struct drm_gem_object *obj = &obj_priv->base; + if (obj->size >= min_size) { + if ((!obj_priv->dirty || + i915_gem_object_is_purgeable(obj_priv)) && +@@ -2141,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + bool lists_empty; +- uint32_t seqno; ++ uint32_t seqno1, seqno2; ++ int ret; + + spin_lock(&dev_priv->mm.active_list_lock); +- lists_empty = list_empty(&dev_priv->mm.flushing_list) && +- list_empty(&dev_priv->mm.active_list); ++ lists_empty = (list_empty(&dev_priv->mm.flushing_list) && ++ list_empty(&dev_priv->render_ring.active_list) && ++ (!HAS_BSD(dev) || ++ list_empty(&dev_priv->bsd_ring.active_list))); + spin_unlock(&dev_priv->mm.active_list_lock); + + if (lists_empty) +@@ -2153,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev) + + /* Flush everything onto the inactive list. */ + i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); +- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); +- if (seqno == 0) ++ seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, ++ &dev_priv->render_ring); ++ if (seqno1 == 0) + return -ENOMEM; ++ ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); + +- return i915_wait_request(dev, seqno); ++ if (HAS_BSD(dev)) { ++ seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, ++ &dev_priv->bsd_ring); ++ if (seqno2 == 0) ++ return -ENOMEM; ++ ++ ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); ++ if (ret) ++ return ret; ++ } ++ ++ ++ return ret; + } + + static int +@@ -2170,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev) + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && +- list_empty(&dev_priv->mm.active_list)); ++ list_empty(&dev_priv->render_ring.active_list) && ++ (!HAS_BSD(dev) ++ || list_empty(&dev_priv->bsd_ring.active_list))); + spin_unlock(&dev_priv->mm.active_list_lock); + + if (lists_empty) +@@ -2190,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev) + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && +- list_empty(&dev_priv->mm.active_list)); ++ list_empty(&dev_priv->render_ring.active_list) && ++ (!HAS_BSD(dev) ++ || list_empty(&dev_priv->bsd_ring.active_list))); + spin_unlock(&dev_priv->mm.active_list_lock); + BUG_ON(!lists_empty); + +@@ -2204,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) + struct drm_gem_object *obj; + int ret; + ++ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; ++ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; + for (;;) { +- i915_gem_retire_requests(dev); ++ i915_gem_retire_requests(dev, render_ring); ++ ++ if (HAS_BSD(dev)) ++ i915_gem_retire_requests(dev, bsd_ring); + + /* If there's an inactive buffer available now, grab it + * and be done. +@@ -2229,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) + * things, wait for the next to finish and hopefully leave us + * a buffer to evict. + */ +- if (!list_empty(&dev_priv->mm.request_list)) { ++ if (!list_empty(&render_ring->request_list)) { ++ struct drm_i915_gem_request *request; ++ ++ request = list_first_entry(&render_ring->request_list, ++ struct drm_i915_gem_request, ++ list); ++ ++ ret = i915_wait_request(dev, ++ request->seqno, request->ring); ++ if (ret) ++ return ret; ++ ++ continue; ++ } ++ ++ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { + struct drm_i915_gem_request *request; + +- request = list_first_entry(&dev_priv->mm.request_list, ++ request = list_first_entry(&bsd_ring->request_list, + struct drm_i915_gem_request, + list); + +- ret = i915_wait_request(dev, request->seqno); ++ ret = i915_wait_request(dev, ++ request->seqno, request->ring); + if (ret) + return ret; + +@@ -2253,7 +2174,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) + + /* Find an object that we can immediately reuse */ + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { +- obj = obj_priv->obj; ++ obj = &obj_priv->base; + if (obj->size >= min_size) + break; + +@@ -2263,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) + if (obj != NULL) { + uint32_t seqno; + +- i915_gem_flush(dev, ++ i915_gem_flush_ring(dev, + obj->write_domain, +- obj->write_domain); +- seqno = i915_add_request(dev, NULL, obj->write_domain); ++ obj->write_domain, ++ obj_priv->ring); ++ seqno = i915_add_request(dev, NULL, ++ obj->write_domain, ++ obj_priv->ring); + if (seqno == 0) + return -ENOMEM; + continue; +@@ -2294,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, + struct inode *inode; + struct page *page; + ++ BUG_ON(obj_priv->pages_refcount ++ == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); ++ + if (obj_priv->pages_refcount++ != 0) + return 0; + +@@ -2485,9 +2412,10 @@ static int i915_find_fence_reg(struct drm_device *dev) + + /* None available, try to steal one or wait for a user to finish */ + i = I915_FENCE_REG_NONE; +- list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, +- fence_list) { +- obj = obj_priv->obj; ++ list_for_each_entry(reg, &dev_priv->mm.fence_list, ++ lru_list) { ++ obj = reg->obj; ++ obj_priv = to_intel_bo(obj); + + if (obj_priv->pin_count) + continue; +@@ -2536,7 +2464,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) + + /* Just update our place in the LRU if our fence is getting used. */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { +- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); ++ reg = &dev_priv->fence_regs[obj_priv->fence_reg]; ++ list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + return 0; + } + +@@ -2566,7 +2495,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) + + obj_priv->fence_reg = ret; + reg = &dev_priv->fence_regs[obj_priv->fence_reg]; +- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); ++ list_add_tail(®->lru_list, &dev_priv->mm.fence_list); + + reg->obj = obj; + +@@ -2598,6 +2527,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ++ struct drm_i915_fence_reg *reg = ++ &dev_priv->fence_regs[obj_priv->fence_reg]; + + if (IS_GEN6(dev)) { + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + +@@ -2616,9 +2547,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) + I915_WRITE(fence_reg, 0); + } + +- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; ++ reg->obj = NULL; + obj_priv->fence_reg = I915_FENCE_REG_NONE; +- list_del_init(&obj_priv->fence_list); ++ list_del_init(®->lru_list); + } + + /** +@@ -2688,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) + return -EINVAL; + } + ++ /* If the object is bigger than the entire aperture, reject it early ++ * before evicting everything in a vain attempt to find space. ++ */ ++ if (obj->size > dev->gtt_total) { ++ DRM_ERROR("Attempting to bind an object larger than the aperture\n"); ++ return -E2BIG; ++ } ++ + search_free: + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, + obj->size, alignment, 0); +@@ -2798,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + uint32_t old_write_domain; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + return; +@@ -2805,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) + /* Queue the GPU write cache flushing we need. */ + old_write_domain = obj->write_domain; + i915_gem_flush(dev, 0, obj->write_domain); +- (void) i915_add_request(dev, NULL, obj->write_domain); ++ (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); + BUG_ON(obj->write_domain); + + trace_i915_gem_object_change_domain(obj, +@@ -2945,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) + DRM_INFO("%s: object %p wait for seqno %08x\n", + __func__, obj, obj_priv->last_rendering_seqno); + #endif +- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); ++ ret = i915_do_wait_request(dev, ++ obj_priv->last_rendering_seqno, ++ 0, ++ obj_priv->ring); + if (ret != 0) + return ret; + } + ++ i915_gem_object_flush_cpu_write_domain(obj); ++ + old_write_domain = obj->write_domain; + old_read_domains = obj->read_domains; + +- obj->read_domains &= I915_GEM_DOMAIN_GTT; +- +- i915_gem_object_flush_cpu_write_domain(obj); +- + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); +- obj->read_domains |= I915_GEM_DOMAIN_GTT; ++ obj->read_domains = I915_GEM_DOMAIN_GTT; + obj->write_domain = I915_GEM_DOMAIN_GTT; + obj_priv->dirty = 1; + +@@ -3345,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + obj_priv->tiling_mode != I915_TILING_NONE; + + /* Check fence reg constraints and rebind if necessary */ +- if (need_fence && !i915_gem_object_fence_offset_ok(obj, +- obj_priv->tiling_mode)) +- i915_gem_object_unbind(obj); ++ if (need_fence && ++ !i915_gem_object_fence_offset_ok(obj, ++ obj_priv->tiling_mode)) { ++ ret = i915_gem_object_unbind(obj); ++ if (ret) ++ return ret; ++ } + + /* Choose the GTT offset for our buffer and put it there. */ + ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); +@@ -3361,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + if (need_fence) { + ret = i915_gem_object_get_fence_reg(obj); + if (ret != 0) { +- if (ret != -EBUSY && ret != -ERESTARTSYS) +- DRM_ERROR("Failure to install fence: %d\n", +- ret); + i915_gem_object_unpin(obj); + return ret; + } +@@ -3536,62 +3478,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + return 0; + } + +-/** Dispatch a batchbuffer to the ring +- */ +-static int +-i915_dispatch_gem_execbuffer(struct drm_device *dev, +- struct drm_i915_gem_execbuffer2 *exec, +- struct drm_clip_rect *cliprects, +- uint64_t exec_offset) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- int nbox = exec->num_cliprects; +- int i = 0, count; +- uint32_t exec_start, exec_len; +- RING_LOCALS; +- +- exec_start = (uint32_t) exec_offset + exec->batch_start_offset; +- exec_len = (uint32_t) exec->batch_len; +- +- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); +- +- count = nbox ? nbox : 1; +- +- for (i = 0; i < count; i++) { +- if (i < nbox) { +- int ret = i915_emit_box(dev, cliprects, i, +- exec->DR1, exec->DR4); +- if (ret) +- return ret; +- } +- +- if (IS_I830(dev) || IS_845G(dev)) { +- BEGIN_LP_RING(4); +- OUT_RING(MI_BATCH_BUFFER); +- OUT_RING(exec_start | MI_BATCH_NON_SECURE); +- OUT_RING(exec_start + exec_len - 4); +- OUT_RING(0); +- ADVANCE_LP_RING(); +- } else { +- BEGIN_LP_RING(2); +- if (IS_I965G(dev)) { +- OUT_RING(MI_BATCH_BUFFER_START | +- (2 << 6) | +- MI_BATCH_NON_SECURE_I965); +- OUT_RING(exec_start); +- } else { +- OUT_RING(MI_BATCH_BUFFER_START | +- (2 << 6)); +- OUT_RING(exec_start | MI_BATCH_NON_SECURE); +- } +- ADVANCE_LP_RING(); +- } +- } +- +- /* XXX breadcrumb */ +- return 0; +-} +- + /* Throttle our rendering by waiting until the ring has completed our requests + * emitted over 20 msec ago. + * +@@ -3620,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) + if (time_after_eq(request->emitted_jiffies, recent_enough)) + break; + +- ret = i915_wait_request(dev, request->seqno); ++ ret = i915_wait_request(dev, request->seqno, request->ring); + if (ret != 0) + break; + } +@@ -3777,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + uint32_t seqno, flush_domains, reloc_index; + int pin_tries, flips; + ++ struct intel_ring_buffer *ring = NULL; ++ + #if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); + #endif ++ if (args->flags & I915_EXEC_BSD) { ++ if (!HAS_BSD(dev)) { ++ DRM_ERROR("execbuf with wrong flag\n"); ++ return -EINVAL; ++ } ++ ring = &dev_priv->bsd_ring; ++ } else { ++ ring = &dev_priv->render_ring; ++ } ++ + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); +@@ -3893,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + if (ret != -ENOSPC || pin_tries >= 1) { + if (ret != -ERESTARTSYS) { + unsigned long long total_size = 0; +- for (i = 0; i < args->buffer_count; i++) ++ int num_fences = 0; ++ for (i = 0; i < args->buffer_count; i++) { ++ obj_priv = object_list[i]->driver_private; ++ + total_size += object_list[i]->size; +- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", ++ num_fences += ++ exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && ++ obj_priv->tiling_mode != I915_TILING_NONE; ++ } ++ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", + pinned+1, args->buffer_count, +- total_size, ret); ++ total_size, num_fences, ++ ret); + DRM_ERROR("%d objects [%d pinned], " + "%d object bytes [%d pinned], " + "%d/%d gtt bytes\n", +@@ -3967,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + i915_gem_flush(dev, + dev->invalidate_domains, + dev->flush_domains); +- if (dev->flush_domains & I915_GEM_GPU_DOMAINS) ++ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { + (void)i915_add_request(dev, file_priv, +- dev->flush_domains); ++ dev->flush_domains, ++ &dev_priv->render_ring); ++ ++ if (HAS_BSD(dev)) ++ (void)i915_add_request(dev, file_priv, ++ dev->flush_domains, ++ &dev_priv->bsd_ring); ++ } + } + + for (i = 0; i < args->buffer_count; i++) { +@@ -4006,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + #endif + + /* Exec the batchbuffer */ +- ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); ++ ret = ring->dispatch_gem_execbuffer(dev, ring, args, ++ cliprects, exec_offset); + if (ret) { + DRM_ERROR("dispatch failed %d\n", ret); + goto err; +@@ -4016,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + * Ensure that the commands in the batch buffer are + * finished before the interrupt fires + */ +- flush_domains = i915_retire_commands(dev); ++ flush_domains = i915_retire_commands(dev, ring); + + i915_verify_inactive(dev, __FILE__, __LINE__); + +@@ -4027,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + * *some* interrupts representing completion of buffers that we can + * wait on when trying to clear up gtt space). + */ +- seqno = i915_add_request(dev, file_priv, flush_domains); ++ seqno = i915_add_request(dev, file_priv, flush_domains, ring); + BUG_ON(seqno == 0); + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; ++ obj_priv = to_intel_bo(obj); + +- i915_gem_object_move_to_active(obj, seqno); ++ i915_gem_object_move_to_active(obj, seqno, ring); + #if WATCH_LRU + DRM_INFO("%s: move to exec list %p\n", __func__, obj); + #endif +@@ -4144,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, + exec2.DR4 = args->DR4; + exec2.num_cliprects = args->num_cliprects; + exec2.cliprects_ptr = args->cliprects_ptr; +- exec2.flags = 0; ++ exec2.flags = I915_EXEC_RENDER; + + ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); + if (!ret) { +@@ -4230,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret; + ++ BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); ++ + i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ if (obj_priv->gtt_space != NULL) { ++ if (alignment == 0) ++ alignment = i915_gem_get_gtt_alignment(obj); ++ if (obj_priv->gtt_offset & (alignment - 1)) { ++ ret = i915_gem_object_unbind(obj); ++ if (ret) ++ return ret; ++ } ++ } ++ + if (obj_priv->gtt_space == NULL) { + ret = i915_gem_object_bind_to_gtt(obj, alignment); + if (ret) +@@ -4383,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_i915_gem_busy *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; ++ drm_i915_private_t *dev_priv = dev->dev_private; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { +@@ -4397,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + * actually unmasked, and our working set ends up being larger than + * required. + */ +- i915_gem_retire_requests(dev); ++ i915_gem_retire_requests(dev, &dev_priv->render_ring); ++ ++ if (HAS_BSD(dev)) ++ i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + + obj_priv = to_intel_bo(obj); + /* Don't count being on the flushing list against the object being +@@ -4471,34 +4403,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + return 0; + } + +-int i915_gem_init_object(struct drm_gem_object *obj) ++struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, ++ size_t size) + { +- struct drm_i915_gem_object *obj_priv; ++ struct drm_i915_gem_object *obj; + +- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); +- if (obj_priv == NULL) +- return -ENOMEM; ++ obj = kzalloc(sizeof(*obj), GFP_KERNEL); ++ if (obj == NULL) ++ return NULL; + +- /* +- * We've just allocated pages from the kernel, +- * so they've just been written by the CPU with +- * zeros. They'll need to be clflushed before we +- * use them with the GPU. +- */ +- obj->write_domain = I915_GEM_DOMAIN_CPU; +- obj->read_domains = I915_GEM_DOMAIN_CPU; ++ if (drm_gem_object_init(dev, &obj->base, size) != 0) { ++ kfree(obj); ++ return NULL; ++ } + +- obj_priv->agp_type = AGP_USER_MEMORY; ++ obj->base.write_domain = I915_GEM_DOMAIN_CPU; ++ obj->base.read_domains = I915_GEM_DOMAIN_CPU; + +- obj->driver_private = obj_priv; +- obj_priv->obj = obj; +- obj_priv->fence_reg = I915_FENCE_REG_NONE; +- INIT_LIST_HEAD(&obj_priv->list); +- INIT_LIST_HEAD(&obj_priv->gpu_write_list); +- INIT_LIST_HEAD(&obj_priv->fence_list); +- obj_priv->madv = I915_MADV_WILLNEED; ++ obj->agp_type = AGP_USER_MEMORY; ++ obj->base.driver_private = NULL; ++ obj->fence_reg = I915_FENCE_REG_NONE; ++ INIT_LIST_HEAD(&obj->list); ++ INIT_LIST_HEAD(&obj->gpu_write_list); ++ obj->madv = I915_MADV_WILLNEED; ++ ++ trace_i915_gem_object_create(&obj->base); + +- trace_i915_gem_object_create(obj); ++ return &obj->base; ++} ++ ++int i915_gem_init_object(struct drm_gem_object *obj) ++{ ++ BUG(); + + return 0; + } +@@ -4521,9 +4457,11 @@ void i915_gem_free_object(struct drm_gem_object *obj) + if (obj_priv->mmap_offset) + i915_gem_free_mmap_offset(obj); + ++ drm_gem_object_release(obj); ++ + kfree(obj_priv->page_cpu_valid); + kfree(obj_priv->bit_17); +- kfree(obj->driver_private); ++ kfree(obj_priv); + } + + /** Unbinds all inactive objects. */ +@@ -4536,9 +4474,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev) + struct drm_gem_object *obj; + int ret; + +- obj = list_first_entry(&dev_priv->mm.inactive_list, +- struct drm_i915_gem_object, +- list)->obj; ++ obj = &list_first_entry(&dev_priv->mm.inactive_list, ++ struct drm_i915_gem_object, ++ list)->base; + + ret = i915_gem_object_unbind(obj); + if (ret != 0) { +@@ -4558,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev) + + mutex_lock(&dev->struct_mutex); + +- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { ++ if (dev_priv->mm.suspended || ++ (dev_priv->render_ring.gem_object == NULL) || ++ (HAS_BSD(dev) && ++ dev_priv->bsd_ring.gem_object == NULL)) { + mutex_unlock(&dev->struct_mutex); + return 0; + } +@@ -4608,7 +4549,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) + struct drm_i915_gem_object *obj_priv; + int ret; + +- obj = drm_gem_object_alloc(dev, 4096); ++ obj = i915_gem_alloc_object(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; +@@ -4639,71 +4580,6 @@ err: + return ret; + } + +-static int +-i915_gem_init_hws(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_gem_object *obj; +- struct drm_i915_gem_object *obj_priv; +- int ret; +- +- /* If we need a physical address for the status page, it's already +- * initialized at driver load time. +- */ +- if (!I915_NEED_GFX_HWS(dev)) +- return 0; +- +- obj = drm_gem_object_alloc(dev, 4096); +- if (obj == NULL) { +- DRM_ERROR("Failed to allocate status page\n"); +- ret = -ENOMEM; +- goto err; +- } +- obj_priv = to_intel_bo(obj); +- obj_priv->agp_type = AGP_USER_CACHED_MEMORY; +- +- ret = i915_gem_object_pin(obj, 4096); +- if (ret != 0) { +- drm_gem_object_unreference(obj); +- goto err_unref; +- } +- +- dev_priv->status_gfx_addr = obj_priv->gtt_offset; +- +- dev_priv->hw_status_page = kmap(obj_priv->pages[0]); +- if (dev_priv->hw_status_page == NULL) { +- DRM_ERROR("Failed to map status page.\n"); +- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); +- ret = -EINVAL; +- goto err_unpin; +- } +- +- if (HAS_PIPE_CONTROL(dev)) { +- ret = i915_gem_init_pipe_control(dev); +- if (ret) +- goto err_unpin; +- } +- +- dev_priv->hws_obj = obj; +- memset(dev_priv->hw_status_page, 0, PAGE_SIZE); +- if (IS_GEN6(dev)) { +- I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); +- I915_READ(HWS_PGA_GEN6); /* posting read */ +- } else { +- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); +- I915_READ(HWS_PGA); /* posting read */ +- } +- DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); +- +- return 0; +- +-err_unpin: +- i915_gem_object_unpin(obj); +-err_unref: +- drm_gem_object_unreference(obj); +-err: +- return 0; +-} + + static void + i915_gem_cleanup_pipe_control(struct drm_device *dev) +@@ -4722,146 +4598,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev) + dev_priv->seqno_page = NULL; + } + +-static void +-i915_gem_cleanup_hws(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_gem_object *obj; +- struct drm_i915_gem_object *obj_priv; +- +- if (dev_priv->hws_obj == NULL) +- return; +- +- obj = dev_priv->hws_obj; +- obj_priv = to_intel_bo(obj); +- +- kunmap(obj_priv->pages[0]); +- i915_gem_object_unpin(obj); +- drm_gem_object_unreference(obj); +- dev_priv->hws_obj = NULL; +- +- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); +- dev_priv->hw_status_page = NULL; +- +- if (HAS_PIPE_CONTROL(dev)) +- i915_gem_cleanup_pipe_control(dev); +- +- /* Write high address into HWS_PGA when disabling. */ +- I915_WRITE(HWS_PGA, 0x1ffff000); +-} +- + int + i915_gem_init_ringbuffer(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_gem_object *obj; +- struct drm_i915_gem_object *obj_priv; +- drm_i915_ring_buffer_t *ring = &dev_priv->ring; + int ret; +- u32 head; + +- ret = i915_gem_init_hws(dev); +- if (ret != 0) +- return ret; ++ dev_priv->render_ring = render_ring; + +- obj = drm_gem_object_alloc(dev, 128 * 1024); +- if (obj == NULL) { +- DRM_ERROR("Failed to allocate ringbuffer\n"); +- i915_gem_cleanup_hws(dev); +- return -ENOMEM; +- } +- obj_priv = to_intel_bo(obj); +- +- ret = i915_gem_object_pin(obj, 4096); +- if (ret != 0) { +- drm_gem_object_unreference(obj); +- i915_gem_cleanup_hws(dev); +- return ret; ++ if (!I915_NEED_GFX_HWS(dev)) { ++ dev_priv->render_ring.status_page.page_addr ++ = dev_priv->status_page_dmah->vaddr; ++ memset(dev_priv->render_ring.status_page.page_addr, ++ 0, PAGE_SIZE); + } + +- /* Set up the kernel mapping for the ring. */ +- ring->Size = obj->size; +- +- ring->map.offset = dev->agp->base + obj_priv->gtt_offset; +- ring->map.size = obj->size; +- ring->map.type = 0; +- ring->map.flags = 0; +- ring->map.mtrr = 0; +- +- drm_core_ioremap_wc(&ring->map, dev); +- if (ring->map.handle == NULL) { +- DRM_ERROR("Failed to map ringbuffer.\n"); +- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); +- i915_gem_object_unpin(obj); +- drm_gem_object_unreference(obj); +- i915_gem_cleanup_hws(dev); +- return -EINVAL; +- } +- ring->ring_obj = obj; +- ring->virtual_start = ring->map.handle; +- +- /* Stop the ring if it's running. */ +- I915_WRITE(PRB0_CTL, 0); +- I915_WRITE(PRB0_TAIL, 0); +- I915_WRITE(PRB0_HEAD, 0); +- +- /* Initialize the ring. */ +- I915_WRITE(PRB0_START, obj_priv->gtt_offset); +- head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- +- /* G45 ring initialization fails to reset head to zero */ +- if (head != 0) { +- DRM_ERROR("Ring head not reset to zero " +- "ctl %08x head %08x tail %08x start %08x\n", +- I915_READ(PRB0_CTL), +- I915_READ(PRB0_HEAD), +- I915_READ(PRB0_TAIL), +- I915_READ(PRB0_START)); +- I915_WRITE(PRB0_HEAD, 0); +- +- DRM_ERROR("Ring head forced to zero " +- "ctl %08x head %08x tail %08x start %08x\n", +- I915_READ(PRB0_CTL), +- I915_READ(PRB0_HEAD), +- I915_READ(PRB0_TAIL), +- I915_READ(PRB0_START)); +- } +- +- I915_WRITE(PRB0_CTL, +- ((obj->size - 4096) & RING_NR_PAGES) | +- RING_NO_REPORT | +- RING_VALID); +- +- head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- +- /* If the head is still not zero, the ring is dead */ +- if (head != 0) { +- DRM_ERROR("Ring initialization failed " +- "ctl %08x head %08x tail %08x start %08x\n", +- I915_READ(PRB0_CTL), +- I915_READ(PRB0_HEAD), +- I915_READ(PRB0_TAIL), +- I915_READ(PRB0_START)); +- return -EIO; ++ if (HAS_PIPE_CONTROL(dev)) { ++ ret = i915_gem_init_pipe_control(dev); ++ if (ret) ++ return ret; + } + +- /* Update our cache of the ring state */ +- if (!drm_core_check_feature(dev, DRIVER_MODESET)) +- i915_kernel_lost_context(dev); +- else { +- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; +- ring->space = ring->head - (ring->tail + 8); +- if (ring->space < 0) +- ring->space += ring->Size; +- } ++ ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); ++ if (ret) ++ goto cleanup_pipe_control; + +- if (IS_I9XX(dev) && !IS_GEN3(dev)) { +- I915_WRITE(MI_MODE, +- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); ++ if (HAS_BSD(dev)) { ++ dev_priv->bsd_ring = bsd_ring; ++ ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); ++ if (ret) ++ goto cleanup_render_ring; + } + + return 0; ++ ++cleanup_render_ring: ++ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); ++cleanup_pipe_control: ++ if (HAS_PIPE_CONTROL(dev)) ++ i915_gem_cleanup_pipe_control(dev); ++ return ret; + } + + void +@@ -4869,17 +4645,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (dev_priv->ring.ring_obj == NULL) +- return; +- +- drm_core_ioremapfree(&dev_priv->ring.map, dev); +- +- i915_gem_object_unpin(dev_priv->ring.ring_obj); +- drm_gem_object_unreference(dev_priv->ring.ring_obj); +- dev_priv->ring.ring_obj = NULL; +- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); +- +- i915_gem_cleanup_hws(dev); ++ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); ++ if (HAS_BSD(dev)) ++ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); ++ if (HAS_PIPE_CONTROL(dev)) ++ i915_gem_cleanup_pipe_control(dev); + } + + int +@@ -4907,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + } + + spin_lock(&dev_priv->mm.active_list_lock); +- BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); ++ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); +- BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); ++ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); + mutex_unlock(&dev->struct_mutex); + + drm_irq_install(dev); +@@ -4951,16 +4723,20 @@ i915_gem_load(struct drm_device *dev) + drm_i915_private_t *dev_priv = dev->dev_private; + + spin_lock_init(&dev_priv->mm.active_list_lock); +- INIT_LIST_HEAD(&dev_priv->mm.active_list); + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); +- INIT_LIST_HEAD(&dev_priv->mm.request_list); + INIT_LIST_HEAD(&dev_priv->mm.fence_list); ++ INIT_LIST_HEAD(&dev_priv->render_ring.active_list); ++ INIT_LIST_HEAD(&dev_priv->render_ring.request_list); ++ if (HAS_BSD(dev)) { ++ INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); ++ INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); ++ } ++ for (i = 0; i < 16; i++) ++ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, + i915_gem_retire_work_handler); +- dev_priv->mm.next_gem_seqno = 1; +- + spin_lock(&shrink_list_lock); + list_add(&dev_priv->mm.shrink_list, &shrink_list); + spin_unlock(&shrink_list_lock); +@@ -5185,6 +4961,22 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) + } + + static int ++i915_gpu_is_active(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int lists_empty; ++ ++ spin_lock(&dev_priv->mm.active_list_lock); ++ lists_empty = list_empty(&dev_priv->mm.flushing_list) && ++ list_empty(&dev_priv->render_ring.active_list); ++ if (HAS_BSD(dev)) ++ lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); ++ spin_unlock(&dev_priv->mm.active_list_lock); ++ ++ return !lists_empty; ++} ++ ++static int + i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) + { + drm_i915_private_t *dev_priv, *next_dev; +@@ -5213,6 +5005,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) + + spin_lock(&shrink_list_lock); + ++rescan: + /* first scan for clean buffers */ + list_for_each_entry_safe(dev_priv, next_dev, + &shrink_list, mm.shrink_list) { +@@ -5222,14 +5015,16 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) + continue; + + spin_unlock(&shrink_list_lock); ++ i915_gem_retire_requests(dev, &dev_priv->render_ring); + +- i915_gem_retire_requests(dev); ++ if (HAS_BSD(dev)) ++ i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + + list_for_each_entry_safe(obj_priv, next_obj, + &dev_priv->mm.inactive_list, + list) { + if (i915_gem_object_is_purgeable(obj_priv)) { +- i915_gem_object_unbind(obj_priv->obj); ++ i915_gem_object_unbind(&obj_priv->base); + if (--nr_to_scan <= 0) + break; + } +@@ -5258,7 +5053,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) + &dev_priv->mm.inactive_list, + list) { + if (nr_to_scan > 0) { +- i915_gem_object_unbind(obj_priv->obj); ++ i915_gem_object_unbind(&obj_priv->base); + nr_to_scan--; + } else + cnt++; +@@ -5270,6 +5065,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) + would_deadlock = 0; + } + ++ if (nr_to_scan) { ++ int active = 0; ++ ++ /* ++ * We are desperate for pages, so as a last resort, wait ++ * for the GPU to finish and discard whatever we can. ++ * This has a dramatic impact to reduce the number of ++ * OOM-killer events whilst running the GPU aggressively. ++ */ ++ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { ++ struct drm_device *dev = dev_priv->dev; ++ ++ if (!mutex_trylock(&dev->struct_mutex)) ++ continue; ++ ++ spin_unlock(&shrink_list_lock); ++ ++ if (i915_gpu_is_active(dev)) { ++ i915_gpu_idle(dev); ++ active++; ++ } ++ ++ spin_lock(&shrink_list_lock); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ if (active) ++ goto rescan; ++ } ++ + spin_unlock(&shrink_list_lock); + + if (would_deadlock) +diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c +index 35507cf..80f380b 100644 +--- a/drivers/gpu/drm/i915/i915_gem_debug.c ++++ b/drivers/gpu/drm/i915/i915_gem_debug.c +@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line) + struct drm_i915_gem_object *obj_priv; + + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { +- obj = obj_priv->obj; ++ obj = &obj_priv->base; + if (obj_priv->pin_count || obj_priv->active || + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT))) +diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c +index 4bdccef..4b7c49d 100644 +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, + return -EINVAL; + } + ++ if (obj_priv->pin_count) { ++ drm_gem_object_unreference_unlocked(obj); ++ return -EBUSY; ++ } ++ + if (args->tiling_mode == I915_TILING_NONE) { + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index df6a9cd..2479be0 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -53,7 +53,7 @@ + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + + /** Interrupts that we mask and unmask at runtime. */ +-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) ++#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) + + #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ + PIPE_VBLANK_INTERRUPT_STATUS) +@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) + } + } + +-static inline void ++void + ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) + { + if ((dev_priv->gt_irq_mask_reg & mask) != mask) { +@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) + } + } + +-static inline void ++void + i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) + { + if ((dev_priv->irq_mask_reg & mask) != mask) { +@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev) + + if (HAS_PCH_SPLIT(dev)) + ironlake_enable_display_irq(dev_priv, DE_GSE); +- else ++ else { + i915_enable_pipestat(dev_priv, 1, + I915_LEGACY_BLC_EVENT_ENABLE); ++ if (IS_I965G(dev)) ++ i915_enable_pipestat(dev_priv, 0, ++ I915_LEGACY_BLC_EVENT_ENABLE); ++ } + } + + /** +@@ -256,28 +260,27 @@ static void i915_hotplug_work_func(struct work_struct *work) + hotplug_work); + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + +- if (mode_config->num_connector) { +- list_for_each_entry(connector, &mode_config->connector_list, head) { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ if (mode_config->num_encoder) { ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + if (intel_encoder->hot_plug) + (*intel_encoder->hot_plug) (intel_encoder); + } + } + /* Just fire off a uevent and let userspace tell us what to do */ +- drm_sysfs_hotplug_event(dev); ++ drm_helper_hpd_irq_event(dev); + } + + static void i915_handle_rps_change(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + u32 busy_up, busy_down, max_avg, min_avg; +- u16 rgvswctl; + u8 new_delay = dev_priv->cur_delay; + +- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); ++ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = I915_READ(RCPREVBSYTUPAVG); + busy_down = I915_READ(RCPREVBSYTDNAVG); + max_avg = I915_READ(RCBMAXAVG); +@@ -296,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev) + new_delay = dev_priv->min_delay; + } + +- DRM_DEBUG("rps change requested: %d -> %d\n", +- dev_priv->cur_delay, new_delay); +- +- rgvswctl = I915_READ(MEMSWCTL); +- if (rgvswctl & MEMCTL_CMD_STS) { +- DRM_ERROR("gpu busy, RCS change rejected\n"); +- return; /* still busy with another command */ +- } +- +- /* Program the new state */ +- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | +- (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; +- I915_WRITE(MEMSWCTL, rgvswctl); +- POSTING_READ(MEMSWCTL); +- +- rgvswctl |= MEMCTL_CMD_STS; +- I915_WRITE(MEMSWCTL, rgvswctl); +- +- dev_priv->cur_delay = new_delay; +- +- DRM_DEBUG("rps changed\n"); ++ if (ironlake_set_drps(dev, new_delay)) ++ dev_priv->cur_delay = new_delay; + + return; + } +@@ -327,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) + int ret = IRQ_NONE; + u32 de_iir, gt_iir, de_ier, pch_iir; + struct drm_i915_master_private *master_priv; ++ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + + /* disable master interrupt before clearing iir */ + de_ier = I915_READ(DEIER); +@@ -350,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) + } + + if (gt_iir & GT_PIPE_NOTIFY) { +- u32 seqno = i915_get_gem_seqno(dev); +- dev_priv->mm.irq_gem_seqno = seqno; ++ u32 seqno = render_ring->get_gem_seqno(dev, render_ring); ++ render_ring->irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); +- DRM_WAKEUP(&dev_priv->irq_queue); ++ DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + } ++ if (gt_iir & GT_BSD_USER_INTERRUPT) ++ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); ++ + + if (de_iir & DE_GSE) + ironlake_opregion_gse_intr(dev); +@@ -384,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) + } + + if (de_iir & DE_PCU_EVENT) { +- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); ++ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); + i915_handle_rps_change(dev); + } + +@@ -532,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev) + */ + bbaddr = 0; + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; +- ring = (u32 *)(dev_priv->ring.virtual_start + head); ++ ring = (u32 *)(dev_priv->render_ring.virtual_start + head); + +- while (--ring >= (u32 *)dev_priv->ring.virtual_start) { ++ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { + bbaddr = i915_get_bbaddr(dev, ring); + if (bbaddr) + break; + } + + if (bbaddr == 0) { +- ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); +- while (--ring >= (u32 *)dev_priv->ring.virtual_start) { ++ ring = (u32 *)(dev_priv->render_ring.virtual_start ++ + dev_priv->render_ring.size); ++ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { + bbaddr = i915_get_bbaddr(dev, ring); + if (bbaddr) + break; +@@ -583,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev) + return; + } + +- error->seqno = i915_get_gem_seqno(dev); ++ error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + error->pipeastat = I915_READ(PIPEASTAT); +@@ -611,8 +600,10 @@ static void i915_capture_error_state(struct drm_device *dev) + batchbuffer[0] = NULL; + batchbuffer[1] = NULL; + count = 0; +- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { +- struct drm_gem_object *obj = obj_priv->obj; ++ list_for_each_entry(obj_priv, ++ &dev_priv->render_ring.active_list, list) { ++ ++ struct drm_gem_object *obj = &obj_priv->base; + + if (batchbuffer[0] == NULL && + bbaddr >= obj_priv->gtt_offset && +@@ -635,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev) + error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); + + /* Record the ringbuffer */ +- error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); ++ error->ringbuffer = i915_error_object_create(dev, ++ dev_priv->render_ring.gem_object); + + /* Record buffers on the active list. */ + error->active_bo = NULL; +@@ -647,8 +639,9 @@ static void i915_capture_error_state(struct drm_device *dev) + + if (error->active_bo) { + int i = 0; +- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { +- struct drm_gem_object *obj = obj_priv->obj; ++ list_for_each_entry(obj_priv, ++ &dev_priv->render_ring.active_list, list) { ++ struct drm_gem_object *obj = &obj_priv->base; + + error->active_bo[i].size = obj->size; + error->active_bo[i].name = obj->name; +@@ -699,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev) + i915_error_state_free(dev, error); + } + +-/** +- * i915_handle_error - handle an error interrupt +- * @dev: drm device +- * +- * Do some basic checking of regsiter state at error interrupt time and +- * dump it to the syslog. Also call i915_capture_error_state() to make +- * sure we get a record and make it available in debugfs. Fire a uevent +- * so userspace knows something bad happened (should trigger collection +- * of a ring dump etc.). +- */ +-static void i915_handle_error(struct drm_device *dev, bool wedged) ++static void i915_report_and_clear_eir(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + u32 eir = I915_READ(EIR); +- u32 pipea_stats = I915_READ(PIPEASTAT); +- u32 pipeb_stats = I915_READ(PIPEBSTAT); + +- i915_capture_error_state(dev); ++ if (!eir) ++ return; + + printk(KERN_ERR "render error detected, EIR: 0x%08x\n", + eir); +@@ -762,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) + } + + if (eir & I915_ERROR_MEMORY_REFRESH) { ++ u32 pipea_stats = I915_READ(PIPEASTAT); ++ u32 pipeb_stats = I915_READ(PIPEBSTAT); ++ + printk(KERN_ERR "memory refresh error\n"); + printk(KERN_ERR "PIPEASTAT: 0x%08x\n", + pipea_stats); +@@ -818,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + } ++} ++ ++/** ++ * i915_handle_error - handle an error interrupt ++ * @dev: drm device ++ * ++ * Do some basic checking of regsiter state at error interrupt time and ++ * dump it to the syslog. Also call i915_capture_error_state() to make ++ * sure we get a record and make it available in debugfs. Fire a uevent ++ * so userspace knows something bad happened (should trigger collection ++ * of a ring dump etc.). ++ */ ++static void i915_handle_error(struct drm_device *dev, bool wedged) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ i915_capture_error_state(dev); ++ i915_report_and_clear_eir(dev); + + if (wedged) { + atomic_set(&dev_priv->mm.wedged, 1); +@@ -825,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) + /* + * Wakeup waiting processes so they don't hang + */ +- DRM_WAKEUP(&dev_priv->irq_queue); ++ DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + } + + queue_work(dev_priv->wq, &dev_priv->error_work); +@@ -844,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + unsigned long irqflags; + int irq_received; + int ret = IRQ_NONE; ++ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + + atomic_inc(&dev_priv->irq_received); + +@@ -924,14 +928,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + } + + if (iir & I915_USER_INTERRUPT) { +- u32 seqno = i915_get_gem_seqno(dev); +- dev_priv->mm.irq_gem_seqno = seqno; ++ u32 seqno = ++ render_ring->get_gem_seqno(dev, render_ring); ++ render_ring->irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); +- DRM_WAKEUP(&dev_priv->irq_queue); ++ DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + } + ++ if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) ++ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); ++ + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) + intel_prepare_page_flip(dev, 0); + +@@ -950,7 +958,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + intel_finish_page_flip(dev, 1); + } + +- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || ++ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || ++ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || + (iir & I915_ASLE_INTERRUPT)) + opregion_asle_intr(dev); + +@@ -979,7 +988,6 @@ static int i915_emit_irq(struct drm_device * dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; +- RING_LOCALS; + + i915_kernel_lost_context(dev); + +@@ -1001,43 +1009,13 @@ static int i915_emit_irq(struct drm_device * dev) + return dev_priv->counter; + } + +-void i915_user_irq_get(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; +- unsigned long irqflags; +- +- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); +- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { +- if (HAS_PCH_SPLIT(dev)) +- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); +- else +- i915_enable_irq(dev_priv, I915_USER_INTERRUPT); +- } +- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); +-} +- +-void i915_user_irq_put(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; +- unsigned long irqflags; +- +- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); +- BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); +- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { +- if (HAS_PCH_SPLIT(dev)) +- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); +- else +- i915_disable_irq(dev_priv, I915_USER_INTERRUPT); +- } +- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); +-} +- + void i915_trace_irq_get(struct drm_device *dev, u32 seqno) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + + if (dev_priv->trace_irq_seqno == 0) +- i915_user_irq_get(dev); ++ render_ring->user_irq_get(dev, render_ring); + + dev_priv->trace_irq_seqno = seqno; + } +@@ -1047,6 +1025,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret = 0; ++ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + + DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, + READ_BREADCRUMB(dev_priv)); +@@ -1060,10 +1039,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + +- i915_user_irq_get(dev); +- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, ++ render_ring->user_irq_get(dev, render_ring); ++ DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); +- i915_user_irq_put(dev); ++ render_ring->user_irq_put(dev, render_ring); + + if (ret == -EBUSY) { + DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", +@@ -1082,7 +1061,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, + drm_i915_irq_emit_t *emit = data; + int result; + +- if (!dev_priv || !dev_priv->ring.virtual_start) { ++ if (!dev_priv || !dev_priv->render_ring.virtual_start) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } +@@ -1228,9 +1207,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data, + return -EINVAL; + } + +-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { ++struct drm_i915_gem_request * ++i915_get_tail_request(struct drm_device *dev) ++{ + drm_i915_private_t *dev_priv = dev->dev_private; +- return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); ++ return list_entry(dev_priv->render_ring.request_list.prev, ++ struct drm_i915_gem_request, list); + } + + /** +@@ -1255,8 +1237,10 @@ void i915_hangcheck_elapsed(unsigned long data) + acthd = I915_READ(ACTHD_I965); + + /* If all work is done then ACTHD clearly hasn't advanced. */ +- if (list_empty(&dev_priv->mm.request_list) || +- i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { ++ if (list_empty(&dev_priv->render_ring.request_list) || ++ i915_seqno_passed(i915_get_gem_seqno(dev, ++ &dev_priv->render_ring), ++ i915_get_tail_request(dev)->seqno)) { + dev_priv->hangcheck_count = 0; + return; + } +@@ -1309,7 +1293,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; +- u32 render_mask = GT_PIPE_NOTIFY; ++ u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; + u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + +@@ -1323,7 +1307,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) + (void) I915_READ(DEIER); + + /* user interrupt should be enabled, but masked initial */ +- dev_priv->gt_irq_mask_reg = 0xffffffff; ++ dev_priv->gt_irq_mask_reg = ~render_mask; + dev_priv->gt_irq_enable_reg = render_mask; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); +@@ -1386,7 +1370,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) + u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; + u32 error_mask; + +- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); ++ DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); ++ ++ if (HAS_BSD(dev)) ++ DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 4cbc521..64b0a3a 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -334,6 +334,7 @@ + #define I915_DEBUG_INTERRUPT (1<<2) + #define I915_USER_INTERRUPT (1<<1) + #define I915_ASLE_INTERRUPT (1<<0) ++#define I915_BSD_USER_INTERRUPT (1<<25) + #define EIR 0x020b0 + #define EMR 0x020b4 + #define ESR 0x020b8 +@@ -368,6 +369,36 @@ + #define BB_ADDR 0x02140 /* 8 bytes */ + #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ + ++/* GEN6 interrupt control */ ++#define GEN6_RENDER_HWSTAM 0x2098 ++#define GEN6_RENDER_IMR 0x20a8 ++#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) ++#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) ++#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) ++#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) ++#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) ++#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) ++#define GEN6_RENDER_SYNC_STATUS (1 << 2) ++#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) ++#define GEN6_RENDER_USER_INTERRUPT (1 << 0) ++ ++#define GEN6_BLITTER_HWSTAM 0x22098 ++#define GEN6_BLITTER_IMR 0x220a8 ++#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) ++#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) ++#define GEN6_BLITTER_SYNC_STATUS (1 << 24) ++#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) ++/* ++ * BSD (bit stream decoder instruction and interrupt control register defines ++ * (G4X and Ironlake only) ++ */ ++ ++#define BSD_RING_TAIL 0x04030 ++#define BSD_RING_HEAD 0x04034 ++#define BSD_RING_START 0x04038 ++#define BSD_RING_CTL 0x0403c ++#define BSD_RING_ACTHD 0x04074 ++#define BSD_HWS_PGA 0x04080 + + /* + * Framebuffer compression (915+ only) +@@ -805,6 +836,10 @@ + #define DCC_CHANNEL_XOR_DISABLE (1 << 10) + #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) + ++/** Pineview MCH register contains DDR3 setting */ ++#define CSHRDDR3CTL 0x101a8 ++#define CSHRDDR3CTL_DDR3 (1 << 2) ++ + /** 965 MCH register controlling DRAM channel configuration */ + #define C0DRB3 0x10206 + #define C1DRB3 0x10606 +@@ -826,6 +861,12 @@ + #define CLKCFG_MEM_800 (3 << 4) + #define CLKCFG_MEM_MASK (7 << 4) + ++#define TR1 0x11006 ++#define TSFS 0x11020 ++#define TSFS_SLOPE_MASK 0x0000ff00 ++#define TSFS_SLOPE_SHIFT 8 ++#define TSFS_INTR_MASK 0x000000ff ++ + #define CRSTANDVID 0x11100 + #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ + #define PXVFREQ_PX_MASK 0x7f000000 +@@ -964,6 +1005,41 @@ + #define MEMSTAT_SRC_CTL_STDBY 3 + #define RCPREVBSYTUPAVG 0x113b8 + #define RCPREVBSYTDNAVG 0x113bc ++#define SDEW 0x1124c ++#define CSIEW0 0x11250 ++#define CSIEW1 0x11254 ++#define CSIEW2 0x11258 ++#define PEW 0x1125c ++#define DEW 0x11270 ++#define MCHAFE 0x112c0 ++#define CSIEC 0x112e0 ++#define DMIEC 0x112e4 ++#define DDREC 0x112e8 ++#define PEG0EC 0x112ec ++#define PEG1EC 0x112f0 ++#define GFXEC 0x112f4 ++#define RPPREVBSYTUPAVG 0x113b8 ++#define RPPREVBSYTDNAVG 0x113bc ++#define ECR 0x11600 ++#define ECR_GPFE (1<<31) ++#define ECR_IMONE (1<<30) ++#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ ++#define OGW0 0x11608 ++#define OGW1 0x1160c ++#define EG0 0x11610 ++#define EG1 0x11614 ++#define EG2 0x11618 ++#define EG3 0x1161c ++#define EG4 0x11620 ++#define EG5 0x11624 ++#define EG6 0x11628 ++#define EG7 0x1162c ++#define PXW 0x11664 ++#define PXWL 0x11680 ++#define LCFUSE02 0x116c0 ++#define LCFUSE_HIV_MASK 0x000000ff ++#define CSIPLL0 0x12c10 ++#define DDRMPLL1 0X12c20 + #define PEG_BAND_GAP_DATA 0x14d68 + + /* +@@ -1055,7 +1131,6 @@ + #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) + #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) + #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ +-#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f + + #define PORT_HOTPLUG_STAT 0x61114 + #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) +@@ -1764,6 +1839,14 @@ + #define DP_LINK_TRAIN_MASK (3 << 28) + #define DP_LINK_TRAIN_SHIFT 28 + ++/* CPT Link training mode */ ++#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) ++#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) ++#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) ++#define DP_LINK_TRAIN_OFF_CPT (3 << 8) ++#define DP_LINK_TRAIN_MASK_CPT (7 << 8) ++#define DP_LINK_TRAIN_SHIFT_CPT 8 ++ + /* Signal voltages. These are mostly controlled by the other end */ + #define DP_VOLTAGE_0_4 (0 << 25) + #define DP_VOLTAGE_0_6 (1 << 25) +@@ -1924,7 +2007,10 @@ + /* Display & cursor control */ + + /* dithering flag on Ironlake */ +-#define PIPE_ENABLE_DITHER (1 << 4) ++#define PIPE_ENABLE_DITHER (1 << 4) ++#define PIPE_DITHER_TYPE_MASK (3 << 2) ++#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) ++#define PIPE_DITHER_TYPE_ST01 (1 << 2) + /* Pipe A */ + #define PIPEADSL 0x70000 + #define PIPEACONF 0x70008 +@@ -1988,15 +2074,24 @@ + + #define DSPFW1 0x70034 + #define DSPFW_SR_SHIFT 23 ++#define DSPFW_SR_MASK (0x1ff<<23) + #define DSPFW_CURSORB_SHIFT 16 ++#define DSPFW_CURSORB_MASK (0x3f<<16) + #define DSPFW_PLANEB_SHIFT 8 ++#define DSPFW_PLANEB_MASK (0x7f<<8) ++#define DSPFW_PLANEA_MASK (0x7f) + #define DSPFW2 0x70038 + #define DSPFW_CURSORA_MASK 0x00003f00 + #define DSPFW_CURSORA_SHIFT 8 ++#define DSPFW_PLANEC_MASK (0x7f) + #define DSPFW3 0x7003c + #define DSPFW_HPLL_SR_EN (1<<31) + #define DSPFW_CURSOR_SR_SHIFT 24 + #define PINEVIEW_SELF_REFRESH_EN (1<<30) ++#define DSPFW_CURSOR_SR_MASK (0x3f<<24) ++#define DSPFW_HPLL_CURSOR_SHIFT 16 ++#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) ++#define DSPFW_HPLL_SR_MASK (0x1ff) + + /* FIFO watermark sizes etc */ + #define G4X_FIFO_LINE_SIZE 64 +@@ -2023,6 +2118,43 @@ + #define PINEVIEW_CURSOR_DFT_WM 0 + #define PINEVIEW_CURSOR_GUARD_WM 5 + ++ ++/* define the Watermark register on Ironlake */ ++#define WM0_PIPEA_ILK 0x45100 ++#define WM0_PIPE_PLANE_MASK (0x7f<<16) ++#define WM0_PIPE_PLANE_SHIFT 16 ++#define WM0_PIPE_SPRITE_MASK (0x3f<<8) ++#define WM0_PIPE_SPRITE_SHIFT 8 ++#define WM0_PIPE_CURSOR_MASK (0x1f) ++ ++#define WM0_PIPEB_ILK 0x45104 ++#define WM1_LP_ILK 0x45108 ++#define WM1_LP_SR_EN (1<<31) ++#define WM1_LP_LATENCY_SHIFT 24 ++#define WM1_LP_LATENCY_MASK (0x7f<<24) ++#define WM1_LP_SR_MASK (0x1ff<<8) ++#define WM1_LP_SR_SHIFT 8 ++#define WM1_LP_CURSOR_MASK (0x3f) ++ ++/* Memory latency timer register */ ++#define MLTR_ILK 0x11222 ++/* the unit of memory self-refresh latency time is 0.5us */ ++#define ILK_SRLT_MASK 0x3f ++ ++/* define the fifo size on Ironlake */ ++#define ILK_DISPLAY_FIFO 128 ++#define ILK_DISPLAY_MAXWM 64 ++#define ILK_DISPLAY_DFTWM 8 ++ ++#define ILK_DISPLAY_SR_FIFO 512 ++#define ILK_DISPLAY_MAX_SRWM 0x1ff ++#define ILK_DISPLAY_DFT_SRWM 0x3f ++#define ILK_CURSOR_SR_FIFO 64 ++#define ILK_CURSOR_MAX_SRWM 0x3f ++#define ILK_CURSOR_DFT_SRWM 8 ++ ++#define ILK_FIFO_LINE_SIZE 64 ++ + /* + * The two pipe frame counter registers are not synchronized, so + * reading a stable value is somewhat tricky. The following code +@@ -2298,14 +2430,23 @@ + #define GT_PIPE_NOTIFY (1 << 4) + #define GT_SYNC_STATUS (1 << 2) + #define GT_USER_INTERRUPT (1 << 0) ++#define GT_BSD_USER_INTERRUPT (1 << 5) ++ + + #define GTISR 0x44010 + #define GTIMR 0x44014 + #define GTIIR 0x44018 + #define GTIER 0x4401c + ++#define ILK_DISPLAY_CHICKEN2 0x42004 ++#define ILK_DPARB_GATE (1<<22) ++#define ILK_VSDPFD_FULL (1<<21) ++#define ILK_DSPCLK_GATE 0x42020 ++#define ILK_DPARB_CLK_GATE (1<<5) ++ + #define DISP_ARB_CTL 0x45000 + #define DISP_TILE_SURFACE_SWIZZLING (1<<13) ++#define DISP_FBC_WM_DIS (1<<15) + + /* PCH */ + +@@ -2316,6 +2457,11 @@ + #define SDE_PORTB_HOTPLUG (1 << 8) + #define SDE_SDVOB_HOTPLUG (1 << 6) + #define SDE_HOTPLUG_MASK (0xf << 8) ++/* CPT */ ++#define SDE_CRT_HOTPLUG_CPT (1 << 19) ++#define SDE_PORTD_HOTPLUG_CPT (1 << 23) ++#define SDE_PORTC_HOTPLUG_CPT (1 << 22) ++#define SDE_PORTB_HOTPLUG_CPT (1 << 21) + + #define SDEISR 0xc4000 + #define SDEIMR 0xc4004 +@@ -2407,6 +2553,17 @@ + #define PCH_SSC4_PARMS 0xc6210 + #define PCH_SSC4_AUX_PARMS 0xc6214 + ++#define PCH_DPLL_SEL 0xc7000 ++#define TRANSA_DPLL_ENABLE (1<<3) ++#define TRANSA_DPLLB_SEL (1<<0) ++#define TRANSA_DPLLA_SEL 0 ++#define TRANSB_DPLL_ENABLE (1<<7) ++#define TRANSB_DPLLB_SEL (1<<4) ++#define TRANSB_DPLLA_SEL (0) ++#define TRANSC_DPLL_ENABLE (1<<11) ++#define TRANSC_DPLLB_SEL (1<<8) ++#define TRANSC_DPLLA_SEL (0) ++ + /* transcoder */ + + #define TRANS_HTOTAL_A 0xe0000 +@@ -2493,6 +2650,19 @@ + #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) + #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) + #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) ++/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. ++ SNB has different settings. */ ++/* SNB A-stepping */ ++#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) ++#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) ++#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) ++#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) ++/* SNB B-stepping */ ++#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) ++#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) ++#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) ++#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) ++#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) + #define FDI_DP_PORT_WIDTH_X1 (0<<19) + #define FDI_DP_PORT_WIDTH_X2 (1<<19) + #define FDI_DP_PORT_WIDTH_X3 (2<<19) +@@ -2525,6 +2695,13 @@ + #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) + #define FDI_SEL_RAWCLK (0<<4) + #define FDI_SEL_PCDCLK (1<<4) ++/* CPT */ ++#define FDI_AUTO_TRAINING (1<<10) ++#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) ++#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) ++#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) ++#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) ++#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) + + #define FDI_RXA_MISC 0xf0010 + #define FDI_RXB_MISC 0xf1010 +@@ -2590,12 +2767,18 @@ + #define SDVO_ENCODING (0) + #define TMDS_ENCODING (2 << 10) + #define NULL_PACKET_VSYNC_ENABLE (1 << 9) ++/* CPT */ ++#define HDMI_MODE_SELECT (1 << 9) ++#define DVI_MODE_SELECT (0) + #define SDVOB_BORDER_ENABLE (1 << 7) + #define AUDIO_ENABLE (1 << 6) + #define VSYNC_ACTIVE_HIGH (1 << 4) + #define HSYNC_ACTIVE_HIGH (1 << 3) + #define PORT_DETECTED (1 << 2) + ++/* PCH SDVOB multiplex with HDMIB */ ++#define PCH_SDVOB HDMIB ++ + #define HDMIC 0xe1150 + #define HDMID 0xe1160 + +@@ -2653,4 +2836,42 @@ + #define PCH_DPD_AUX_CH_DATA4 0xe4320 + #define PCH_DPD_AUX_CH_DATA5 0xe4324 + ++/* CPT */ ++#define PORT_TRANS_A_SEL_CPT 0 ++#define PORT_TRANS_B_SEL_CPT (1<<29) ++#define PORT_TRANS_C_SEL_CPT (2<<29) ++#define PORT_TRANS_SEL_MASK (3<<29) ++ ++#define TRANS_DP_CTL_A 0xe0300 ++#define TRANS_DP_CTL_B 0xe1300 ++#define TRANS_DP_CTL_C 0xe2300 ++#define TRANS_DP_OUTPUT_ENABLE (1<<31) ++#define TRANS_DP_PORT_SEL_B (0<<29) ++#define TRANS_DP_PORT_SEL_C (1<<29) ++#define TRANS_DP_PORT_SEL_D (2<<29) ++#define TRANS_DP_PORT_SEL_MASK (3<<29) ++#define TRANS_DP_AUDIO_ONLY (1<<26) ++#define TRANS_DP_ENH_FRAMING (1<<18) ++#define TRANS_DP_8BPC (0<<9) ++#define TRANS_DP_10BPC (1<<9) ++#define TRANS_DP_6BPC (2<<9) ++#define TRANS_DP_12BPC (3<<9) ++#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) ++#define TRANS_DP_VSYNC_ACTIVE_LOW 0 ++#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) ++#define TRANS_DP_HSYNC_ACTIVE_LOW 0 ++ ++/* SNB eDP training params */ ++/* SNB A-stepping */ ++#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) ++#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) ++#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) ++#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) ++/* SNB B-stepping */ ++#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) ++#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) ++#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) ++#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) ++#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) ++ + #endif /* _I915_REG_H_ */ +diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c +index ac0d1a7..60a5800 100644 +--- a/drivers/gpu/drm/i915/i915_suspend.c ++++ b/drivers/gpu/drm/i915/i915_suspend.c +@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev) + } + /* FIXME: save TV & SDVO state */ + +- /* FBC state */ +- if (IS_GM45(dev)) { +- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); +- } else { +- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); +- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); +- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); +- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); ++ /* Only save FBC state on the platform that supports FBC */ ++ if (I915_HAS_FBC(dev)) { ++ if (IS_GM45(dev)) { ++ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); ++ } else { ++ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); ++ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); ++ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); ++ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); ++ } + } + + /* VGA state */ +@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev) + } + /* FIXME: restore TV & SDVO state */ + +- /* FBC info */ +- if (IS_GM45(dev)) { +- g4x_disable_fbc(dev); +- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); +- } else { +- i8xx_disable_fbc(dev); +- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); +- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); +- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); +- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); ++ /* only restore FBC info on the platform that supports FBC*/ ++ if (I915_HAS_FBC(dev)) { ++ if (IS_GM45(dev)) { ++ g4x_disable_fbc(dev); ++ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); ++ } else { ++ i8xx_disable_fbc(dev); ++ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); ++ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); ++ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); ++ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); ++ } + } +- + /* VGA state */ + if (IS_IRONLAKE(dev)) + I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); +diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h +index 01840d9..fab2176 100644 +--- a/drivers/gpu/drm/i915/i915_trace.h ++++ b/drivers/gpu/drm/i915/i915_trace.h +@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind, + __entry->obj, __entry->gtt_offset) + ); + +-TRACE_EVENT(i915_gem_object_clflush, +- +- TP_PROTO(struct drm_gem_object *obj), +- +- TP_ARGS(obj), +- +- TP_STRUCT__entry( +- __field(struct drm_gem_object *, obj) +- ), +- +- TP_fast_assign( +- __entry->obj = obj; +- ), +- +- TP_printk("obj=%p", __entry->obj) +-); +- + TRACE_EVENT(i915_gem_object_change_domain, + + TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), +@@ -115,7 +98,7 @@ TRACE_EVENT(i915_gem_object_get_fence, + __entry->obj, __entry->fence, __entry->tiling_mode) + ); + +-TRACE_EVENT(i915_gem_object_unbind, ++DECLARE_EVENT_CLASS(i915_gem_object, + + TP_PROTO(struct drm_gem_object *obj), + +@@ -132,21 +115,25 @@ TRACE_EVENT(i915_gem_object_unbind, + TP_printk("obj=%p", __entry->obj) + ); + +-TRACE_EVENT(i915_gem_object_destroy, ++DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, + + TP_PROTO(struct drm_gem_object *obj), + +- TP_ARGS(obj), ++ TP_ARGS(obj) ++); + +- TP_STRUCT__entry( +- __field(struct drm_gem_object *, obj) +- ), ++DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, + +- TP_fast_assign( +- __entry->obj = obj; +- ), ++ TP_PROTO(struct drm_gem_object *obj), + +- TP_printk("obj=%p", __entry->obj) ++ TP_ARGS(obj) ++); ++ ++DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, ++ ++ TP_PROTO(struct drm_gem_object *obj), ++ ++ TP_ARGS(obj) + ); + + /* batch tracing */ +@@ -197,8 +184,7 @@ TRACE_EVENT(i915_gem_request_flush, + __entry->flush_domains, __entry->invalidate_domains) + ); + +- +-TRACE_EVENT(i915_gem_request_complete, ++DECLARE_EVENT_CLASS(i915_gem_request, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +@@ -217,64 +203,35 @@ TRACE_EVENT(i915_gem_request_complete, + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + ); + +-TRACE_EVENT(i915_gem_request_retire, ++DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_ARGS(dev, seqno), +- +- TP_STRUCT__entry( +- __field(u32, dev) +- __field(u32, seqno) +- ), +- +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- __entry->seqno = seqno; +- ), +- +- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ++ TP_ARGS(dev, seqno) + ); + +-TRACE_EVENT(i915_gem_request_wait_begin, ++DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_ARGS(dev, seqno), +- +- TP_STRUCT__entry( +- __field(u32, dev) +- __field(u32, seqno) +- ), +- +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- __entry->seqno = seqno; +- ), +- +- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ++ TP_ARGS(dev, seqno) + ); + +-TRACE_EVENT(i915_gem_request_wait_end, ++DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_ARGS(dev, seqno), ++ TP_ARGS(dev, seqno) ++); + +- TP_STRUCT__entry( +- __field(u32, dev) +- __field(u32, seqno) +- ), ++DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, + +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- __entry->seqno = seqno; +- ), ++ TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ++ TP_ARGS(dev, seqno) + ); + +-TRACE_EVENT(i915_ring_wait_begin, ++DECLARE_EVENT_CLASS(i915_ring, + + TP_PROTO(struct drm_device *dev), + +@@ -291,26 +248,23 @@ TRACE_EVENT(i915_ring_wait_begin, + TP_printk("dev=%u", __entry->dev) + ); + +-TRACE_EVENT(i915_ring_wait_end, ++DEFINE_EVENT(i915_ring, i915_ring_wait_begin, + + TP_PROTO(struct drm_device *dev), + +- TP_ARGS(dev), ++ TP_ARGS(dev) ++); + +- TP_STRUCT__entry( +- __field(u32, dev) +- ), ++DEFINE_EVENT(i915_ring, i915_ring_wait_end, + +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- ), ++ TP_PROTO(struct drm_device *dev), + +- TP_printk("dev=%u", __entry->dev) ++ TP_ARGS(dev) + ); + + #endif /* _I915_TRACE_H_ */ + + /* This part must be outside protection */ + #undef TRACE_INCLUDE_PATH +-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 ++#define TRACE_INCLUDE_PATH . + #include +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c +index f9ba452..96f75d7 100644 +--- a/drivers/gpu/drm/i915/intel_bios.c ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, + panel_fixed_mode->clock = dvo_timing->clock * 10; + panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + ++ if (dvo_timing->hsync_positive) ++ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; ++ else ++ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; ++ ++ if (dvo_timing->vsync_positive) ++ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; ++ else ++ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; ++ + /* Some VBTs have bogus h/vtotal values */ + if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; +@@ -366,6 +376,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, + p_mapping->dvo_port = p_child->dvo_port; + p_mapping->slave_addr = p_child->slave_addr; + p_mapping->dvo_wiring = p_child->dvo_wiring; ++ p_mapping->ddc_pin = p_child->ddc_pin; + p_mapping->initialized = 1; + } else { + DRM_DEBUG_KMS("Maybe one SDVO port is shared by " +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +index 759c2ef..22ff384 100644 +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, + adpa |= ADPA_VSYNC_ACTIVE_HIGH; + + if (intel_crtc->pipe == 0) { +- adpa |= ADPA_PIPE_A_SELECT; ++ if (HAS_PCH_CPT(dev)) ++ adpa |= PORT_TRANS_A_SEL_CPT; ++ else ++ adpa |= ADPA_PIPE_A_SELECT; + if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT_A, 0); + } else { +- adpa |= ADPA_PIPE_B_SELECT; ++ if (HAS_PCH_CPT(dev)) ++ adpa |= PORT_TRANS_B_SEL_CPT; ++ else ++ adpa |= ADPA_PIPE_B_SELECT; + if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT_B, 0); + } +@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 adpa; ++ u32 adpa, temp; + bool ret; + +- adpa = I915_READ(PCH_ADPA); ++ temp = adpa = I915_READ(PCH_ADPA); + +- adpa &= ~ADPA_CRT_HOTPLUG_MASK; +- /* disable HPD first */ +- I915_WRITE(PCH_ADPA, adpa); +- (void)I915_READ(PCH_ADPA); ++ if (HAS_PCH_CPT(dev)) { ++ /* Disable DAC before force detect */ ++ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); ++ (void)I915_READ(PCH_ADPA); ++ } else { ++ adpa &= ~ADPA_CRT_HOTPLUG_MASK; ++ /* disable HPD first */ ++ I915_WRITE(PCH_ADPA, adpa); ++ (void)I915_READ(PCH_ADPA); ++ } + + adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | + ADPA_CRT_HOTPLUG_WARMUP_10MS | +@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) + while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) + ; + ++ if (HAS_PCH_CPT(dev)) { ++ I915_WRITE(PCH_ADPA, temp); ++ (void)I915_READ(PCH_ADPA); ++ } ++ + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(PCH_ADPA); + adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; +@@ -200,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 hotplug_en; ++ u32 hotplug_en, orig, stat; ++ bool ret = false; + int i, tries = 0; + + if (HAS_PCH_SPLIT(dev)) +@@ -215,8 +233,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + tries = 2; + else + tries = 1; +- hotplug_en = I915_READ(PORT_HOTPLUG_EN); +- hotplug_en &= CRT_FORCE_HOTPLUG_MASK; ++ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); ++ hotplug_en &= CRT_HOTPLUG_MASK; + hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; + + if (IS_G4X(dev)) +@@ -238,16 +256,22 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + } while (time_after(timeout, jiffies)); + } + +- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != +- CRT_HOTPLUG_MONITOR_NONE) +- return true; ++ stat = I915_READ(PORT_HOTPLUG_STAT); ++ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) ++ ret = true; ++ ++ /* clear the interrupt we just generated, if any */ ++ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); ++ ++ /* and put the bits back */ ++ I915_WRITE(PORT_HOTPLUG_EN, orig); + +- return false; ++ return ret; + } + +-static bool intel_crt_detect_ddc(struct drm_connector *connector) ++static bool intel_crt_detect_ddc(struct drm_encoder *encoder) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + /* CRT should always be at 0, but check anyway */ + if (intel_encoder->type != INTEL_OUTPUT_ANALOG) +@@ -387,8 +411,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder + static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct drm_encoder *encoder = &intel_encoder->enc; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_crtc *crtc; + int dpms_mode; + enum drm_connector_status status; +@@ -400,18 +424,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto + return connector_status_disconnected; + } + +- if (intel_crt_detect_ddc(connector)) ++ if (intel_crt_detect_ddc(encoder)) + return connector_status_connected; + + /* for pre-945g platforms use load detect */ + if (encoder->crtc && encoder->crtc->enabled) { + status = intel_crt_load_detect(encoder->crtc, intel_encoder); + } else { +- crtc = intel_get_load_detect_pipe(intel_encoder, ++ crtc = intel_get_load_detect_pipe(intel_encoder, connector, + NULL, &dpms_mode); + if (crtc) { + status = intel_crt_load_detect(crtc, intel_encoder); +- intel_release_load_detect_pipe(intel_encoder, dpms_mode); ++ intel_release_load_detect_pipe(intel_encoder, ++ connector, dpms_mode); + } else + status = connector_status_unknown; + } +@@ -421,9 +446,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto + + static void intel_crt_destroy(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- +- intel_i2c_destroy(intel_encoder->ddc_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +@@ -432,29 +454,27 @@ static void intel_crt_destroy(struct drm_connector *connector) + static int intel_crt_get_modes(struct drm_connector *connector) + { + int ret; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct i2c_adapter *ddcbus; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct i2c_adapter *ddc_bus; + struct drm_device *dev = connector->dev; + + +- ret = intel_ddc_get_modes(intel_encoder); ++ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (ret || !IS_G4X(dev)) + goto end; + +- ddcbus = intel_encoder->ddc_bus; + /* Try to probe digital port for output in DVI-I -> VGA mode. */ +- intel_encoder->ddc_bus = +- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); ++ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); + +- if (!intel_encoder->ddc_bus) { +- intel_encoder->ddc_bus = ddcbus; ++ if (!ddc_bus) { + dev_printk(KERN_ERR, &connector->dev->pdev->dev, + "DDC bus registration failed for CRTDDC_D.\n"); + goto end; + } + /* Try to get modes by GPIOD port */ +- ret = intel_ddc_get_modes(intel_encoder); +- intel_i2c_destroy(ddcbus); ++ ret = intel_ddc_get_modes(connector, ddc_bus); ++ intel_i2c_destroy(ddc_bus); + + end: + return ret; +@@ -491,12 +511,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { + static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { + .mode_valid = intel_crt_mode_valid, + .get_modes = intel_crt_get_modes, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_crt_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_crt_enc_funcs = { +@@ -507,6 +531,7 @@ void intel_crt_init(struct drm_device *dev) + { + struct drm_connector *connector; + struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 i2c_reg; + +@@ -514,14 +539,20 @@ void intel_crt_init(struct drm_device *dev) + if (!intel_encoder) + return; + +- connector = &intel_encoder->base; +- drm_connector_init(dev, &intel_encoder->base, ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ ++ connector = &intel_connector->base; ++ drm_connector_init(dev, &intel_connector->base, + &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); + + drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, + DRM_MODE_ENCODER_DAC); + +- drm_mode_connector_attach_encoder(&intel_encoder->base, ++ drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); + + /* Set up the DDC bus. */ +@@ -545,7 +576,7 @@ void intel_crt_init(struct drm_device *dev) + (1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); +- connector->interlace_allowed = 0; ++ connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; + + drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); +@@ -553,5 +584,10 @@ void intel_crt_init(struct drm_device *dev) + + drm_sysfs_connector_add(connector); + ++ if (I915_HAS_HOTPLUG(dev)) ++ connector->polled = DRM_CONNECTOR_POLL_HPD; ++ else ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; + } +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index c7502b6..d753257 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) + { + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *l_entry; ++ struct drm_encoder *l_entry; + +- list_for_each_entry(l_entry, &mode_config->connector_list, head) { +- if (l_entry->encoder && +- l_entry->encoder->crtc == crtc) { +- struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); ++ list_for_each_entry(l_entry, &mode_config->encoder_list, head) { ++ if (l_entry && l_entry->crtc == crtc) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); + if (intel_encoder->type == type) + return true; + } +@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) + return false; + } + +-static struct drm_connector * +-intel_pipe_get_connector (struct drm_crtc *crtc) +-{ +- struct drm_device *dev = crtc->dev; +- struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *l_entry, *ret = NULL; +- +- list_for_each_entry(l_entry, &mode_config->connector_list, head) { +- if (l_entry->encoder && +- l_entry->encoder->crtc == crtc) { +- ret = l_entry; +- break; +- } +- } +- return ret; +-} +- + #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) + /** + * Returns whether the given set of divisors are valid for a given refclk with +@@ -1047,28 +1029,36 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) + void i8xx_disable_fbc(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long timeout = jiffies + msecs_to_jiffies(1); + u32 fbc_ctl; + + if (!I915_HAS_FBC(dev)) + return; + ++ if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) ++ return; /* Already off, just return */ ++ + /* Disable compression */ + fbc_ctl = I915_READ(FBC_CONTROL); + fbc_ctl &= ~FBC_CTL_EN; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ +- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) +- ; /* nothing */ ++ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { ++ if (time_after(jiffies, timeout)) { ++ DRM_DEBUG_DRIVER("FBC idle timed out\n"); ++ break; ++ } ++ ; /* do nothing */ ++ } + + intel_wait_for_vblank(dev); + + DRM_DEBUG_KMS("disabled FBC\n"); + } + +-static bool i8xx_fbc_enabled(struct drm_crtc *crtc) ++static bool i8xx_fbc_enabled(struct drm_device *dev) + { +- struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(FBC_CONTROL) & FBC_CTL_EN; +@@ -1125,14 +1115,43 @@ void g4x_disable_fbc(struct drm_device *dev) + DRM_DEBUG_KMS("disabled FBC\n"); + } + +-static bool g4x_fbc_enabled(struct drm_crtc *crtc) ++static bool g4x_fbc_enabled(struct drm_device *dev) + { +- struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; + } + ++bool intel_fbc_enabled(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (!dev_priv->display.fbc_enabled) ++ return false; ++ ++ return dev_priv->display.fbc_enabled(dev); ++} ++ ++void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ++{ ++ struct drm_i915_private *dev_priv = crtc->dev->dev_private; ++ ++ if (!dev_priv->display.enable_fbc) ++ return; ++ ++ dev_priv->display.enable_fbc(crtc, interval); ++} ++ ++void intel_disable_fbc(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (!dev_priv->display.disable_fbc) ++ return; ++ ++ dev_priv->display.disable_fbc(dev); ++} ++ + /** + * intel_update_fbc - enable/disable FBC as needed + * @crtc: CRTC to point the compressor at +@@ -1167,9 +1186,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, + if (!i915_powersave) + return; + +- if (!dev_priv->display.fbc_enabled || +- !dev_priv->display.enable_fbc || +- !dev_priv->display.disable_fbc) ++ if (!I915_HAS_FBC(dev)) + return; + + if (!crtc->fb) +@@ -1216,28 +1233,26 @@ static void intel_update_fbc(struct drm_crtc *crtc, + goto out_disable; + } + +- if (dev_priv->display.fbc_enabled(crtc)) { ++ if (intel_fbc_enabled(dev)) { + /* We can re-enable it in this case, but need to update pitch */ +- if (fb->pitch > dev_priv->cfb_pitch) +- dev_priv->display.disable_fbc(dev); +- if (obj_priv->fence_reg != dev_priv->cfb_fence) +- dev_priv->display.disable_fbc(dev); +- if (plane != dev_priv->cfb_plane) +- dev_priv->display.disable_fbc(dev); ++ if ((fb->pitch > dev_priv->cfb_pitch) || ++ (obj_priv->fence_reg != dev_priv->cfb_fence) || ++ (plane != dev_priv->cfb_plane)) ++ intel_disable_fbc(dev); + } + +- if (!dev_priv->display.fbc_enabled(crtc)) { +- /* Now try to turn it back on if possible */ +- dev_priv->display.enable_fbc(crtc, 500); +- } ++ /* Now try to turn it back on if possible */ ++ if (!intel_fbc_enabled(dev)) ++ intel_enable_fbc(crtc, 500); + + return; + + out_disable: +- DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); + /* Multiple disables should be harmless */ +- if (dev_priv->display.fbc_enabled(crtc)) +- dev_priv->display.disable_fbc(dev); ++ if (intel_fbc_enabled(dev)) { ++ DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); ++ intel_disable_fbc(dev); ++ } + } + + static int +@@ -1381,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, + Start = obj_priv->gtt_offset; + Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); + +- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); ++ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", ++ Start, Offset, x, y, crtc->fb->pitch); + I915_WRITE(dspstride, crtc->fb->pitch); + if (IS_I965G(dev)) { + I915_WRITE(dspbase, Offset); +@@ -1510,6 +1526,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) + udelay(500); + } + ++/* The FDI link training functions for ILK/Ibexpeak. */ ++static void ironlake_fdi_link_train(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; ++ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; ++ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; ++ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; ++ u32 temp, tries = 0; ++ ++ /* enable CPU FDI TX and PCH FDI RX */ ++ temp = I915_READ(fdi_tx_reg); ++ temp |= FDI_TX_ENABLE; ++ temp &= ~(7 << 19); ++ temp |= (intel_crtc->fdi_lanes - 1) << 19; ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ I915_WRITE(fdi_tx_reg, temp); ++ I915_READ(fdi_tx_reg); ++ ++ temp = I915_READ(fdi_rx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(150); ++ ++ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit ++ for train result */ ++ temp = I915_READ(fdi_rx_imr_reg); ++ temp &= ~FDI_RX_SYMBOL_LOCK; ++ temp &= ~FDI_RX_BIT_LOCK; ++ I915_WRITE(fdi_rx_imr_reg, temp); ++ I915_READ(fdi_rx_imr_reg); ++ udelay(150); ++ ++ for (;;) { ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if ((temp & FDI_RX_BIT_LOCK)) { ++ DRM_DEBUG_KMS("FDI train 1 done.\n"); ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_BIT_LOCK); ++ break; ++ } ++ ++ tries++; ++ ++ if (tries > 5) { ++ DRM_DEBUG_KMS("FDI train 1 fail!\n"); ++ break; ++ } ++ } ++ ++ /* Train 2 */ ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ I915_WRITE(fdi_tx_reg, temp); ++ ++ temp = I915_READ(fdi_rx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ I915_WRITE(fdi_rx_reg, temp); ++ udelay(150); ++ ++ tries = 0; ++ ++ for (;;) { ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if (temp & FDI_RX_SYMBOL_LOCK) { ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_SYMBOL_LOCK); ++ DRM_DEBUG_KMS("FDI train 2 done.\n"); ++ break; ++ } ++ ++ tries++; ++ ++ if (tries > 5) { ++ DRM_DEBUG_KMS("FDI train 2 fail!\n"); ++ break; ++ } ++ } ++ ++ DRM_DEBUG_KMS("FDI train done\n"); ++} ++ ++static int snb_b_fdi_train_param [] = { ++ FDI_LINK_TRAIN_400MV_0DB_SNB_B, ++ FDI_LINK_TRAIN_400MV_6DB_SNB_B, ++ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, ++ FDI_LINK_TRAIN_800MV_0DB_SNB_B, ++}; ++ ++/* The FDI link training functions for SNB/Cougarpoint. */ ++static void gen6_fdi_link_train(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; ++ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; ++ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; ++ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; ++ u32 temp, i; ++ ++ /* enable CPU FDI TX and PCH FDI RX */ ++ temp = I915_READ(fdi_tx_reg); ++ temp |= FDI_TX_ENABLE; ++ temp &= ~(7 << 19); ++ temp |= (intel_crtc->fdi_lanes - 1) << 19; ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ /* SNB-B */ ++ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; ++ I915_WRITE(fdi_tx_reg, temp); ++ I915_READ(fdi_tx_reg); ++ ++ temp = I915_READ(fdi_rx_reg); ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ } ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(150); ++ ++ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit ++ for train result */ ++ temp = I915_READ(fdi_rx_imr_reg); ++ temp &= ~FDI_RX_SYMBOL_LOCK; ++ temp &= ~FDI_RX_BIT_LOCK; ++ I915_WRITE(fdi_rx_imr_reg, temp); ++ I915_READ(fdi_rx_imr_reg); ++ udelay(150); ++ ++ for (i = 0; i < 4; i++ ) { ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ temp |= snb_b_fdi_train_param[i]; ++ I915_WRITE(fdi_tx_reg, temp); ++ udelay(500); ++ ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if (temp & FDI_RX_BIT_LOCK) { ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_BIT_LOCK); ++ DRM_DEBUG_KMS("FDI train 1 done.\n"); ++ break; ++ } ++ } ++ if (i == 4) ++ DRM_DEBUG_KMS("FDI train 1 fail!\n"); ++ ++ /* Train 2 */ ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ if (IS_GEN6(dev)) { ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ /* SNB-B */ ++ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; ++ } ++ I915_WRITE(fdi_tx_reg, temp); ++ ++ temp = I915_READ(fdi_rx_reg); ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ } ++ I915_WRITE(fdi_rx_reg, temp); ++ udelay(150); ++ ++ for (i = 0; i < 4; i++ ) { ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ temp |= snb_b_fdi_train_param[i]; ++ I915_WRITE(fdi_tx_reg, temp); ++ udelay(500); ++ ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if (temp & FDI_RX_SYMBOL_LOCK) { ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_SYMBOL_LOCK); ++ DRM_DEBUG_KMS("FDI train 2 done.\n"); ++ break; ++ } ++ } ++ if (i == 4) ++ DRM_DEBUG_KMS("FDI train 2 fail!\n"); ++ ++ DRM_DEBUG_KMS("FDI train done.\n"); ++} ++ + static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + { + struct drm_device *dev = crtc->dev; +@@ -1523,8 +1752,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; +- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; +- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; + int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; + int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; + int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; +@@ -1541,8 +1768,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; + int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; + int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; ++ int trans_dpll_sel = (pipe == 0) ? 0 : 1; + u32 temp; +- int tries = 5, j, n; ++ int n; + u32 pipe_bpc; + + temp = I915_READ(pipeconf_reg); +@@ -1569,12 +1797,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + /* enable eDP PLL */ + ironlake_enable_pll_edp(crtc); + } else { +- /* enable PCH DPLL */ +- temp = I915_READ(pch_dpll_reg); +- if ((temp & DPLL_VCO_ENABLE) == 0) { +- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); +- I915_READ(pch_dpll_reg); +- } + + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); +@@ -1584,9 +1806,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + */ + temp &= ~(0x7 << 16); + temp |= (pipe_bpc << 11); +- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | +- FDI_SEL_PCDCLK | +- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ ++ temp &= ~(7 << 19); ++ temp |= (intel_crtc->fdi_lanes - 1) << 19; ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(200); ++ ++ /* Switch from Rawclk to PCDclk */ ++ temp = I915_READ(fdi_rx_reg); ++ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); + I915_READ(fdi_rx_reg); + udelay(200); + +@@ -1629,91 +1857,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + } + + if (!HAS_eDP) { +- /* enable CPU FDI TX and PCH FDI RX */ +- temp = I915_READ(fdi_tx_reg); +- temp |= FDI_TX_ENABLE; +- temp |= FDI_DP_PORT_WIDTH_X4; /* default */ +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_1; +- I915_WRITE(fdi_tx_reg, temp); +- I915_READ(fdi_tx_reg); +- +- temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_1; +- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); +- I915_READ(fdi_rx_reg); +- +- udelay(150); +- +- /* Train FDI. */ +- /* umask FDI RX Interrupt symbol_lock and bit_lock bit +- for train result */ +- temp = I915_READ(fdi_rx_imr_reg); +- temp &= ~FDI_RX_SYMBOL_LOCK; +- temp &= ~FDI_RX_BIT_LOCK; +- I915_WRITE(fdi_rx_imr_reg, temp); +- I915_READ(fdi_rx_imr_reg); +- udelay(150); ++ /* For PCH output, training FDI link */ ++ if (IS_GEN6(dev)) ++ gen6_fdi_link_train(crtc); ++ else ++ ironlake_fdi_link_train(crtc); + +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); +- +- if ((temp & FDI_RX_BIT_LOCK) == 0) { +- for (j = 0; j < tries; j++) { +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", +- temp); +- if (temp & FDI_RX_BIT_LOCK) +- break; +- udelay(200); +- } +- if (j != tries) +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_BIT_LOCK); +- else +- DRM_DEBUG_KMS("train 1 fail\n"); +- } else { +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_BIT_LOCK); +- DRM_DEBUG_KMS("train 1 ok 2!\n"); ++ /* enable PCH DPLL */ ++ temp = I915_READ(pch_dpll_reg); ++ if ((temp & DPLL_VCO_ENABLE) == 0) { ++ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); ++ I915_READ(pch_dpll_reg); + } +- temp = I915_READ(fdi_tx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_2; +- I915_WRITE(fdi_tx_reg, temp); +- +- temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_2; +- I915_WRITE(fdi_rx_reg, temp); +- +- udelay(150); ++ udelay(200); + +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); +- +- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { +- for (j = 0; j < tries; j++) { +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", +- temp); +- if (temp & FDI_RX_SYMBOL_LOCK) +- break; +- udelay(200); +- } +- if (j != tries) { +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_SYMBOL_LOCK); +- DRM_DEBUG_KMS("train 2 ok 1!\n"); +- } else +- DRM_DEBUG_KMS("train 2 fail\n"); +- } else { +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_SYMBOL_LOCK); +- DRM_DEBUG_KMS("train 2 ok 2!\n"); ++ if (HAS_PCH_CPT(dev)) { ++ /* Be sure PCH DPLL SEL is set */ ++ temp = I915_READ(PCH_DPLL_SEL); ++ if (trans_dpll_sel == 0 && ++ (temp & TRANSA_DPLL_ENABLE) == 0) ++ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); ++ else if (trans_dpll_sel == 1 && ++ (temp & TRANSB_DPLL_ENABLE) == 0) ++ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); ++ I915_WRITE(PCH_DPLL_SEL, temp); ++ I915_READ(PCH_DPLL_SEL); + } +- DRM_DEBUG_KMS("train done\n"); + + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); +@@ -1724,6 +1893,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + ++ /* enable normal train */ ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | ++ FDI_TX_ENHANCE_FRAME_ENABLE); ++ I915_READ(fdi_tx_reg); ++ ++ temp = I915_READ(fdi_rx_reg); ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_NORMAL_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_NONE; ++ } ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); ++ I915_READ(fdi_rx_reg); ++ ++ /* wait one idle pattern time */ ++ udelay(100); ++ ++ /* For PCH DP, enable TRANS_DP_CTL */ ++ if (HAS_PCH_CPT(dev) && ++ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { ++ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; ++ int reg; ++ ++ reg = I915_READ(trans_dp_ctl); ++ reg &= ~TRANS_DP_PORT_SEL_MASK; ++ reg = TRANS_DP_OUTPUT_ENABLE | ++ TRANS_DP_ENH_FRAMING | ++ TRANS_DP_VSYNC_ACTIVE_HIGH | ++ TRANS_DP_HSYNC_ACTIVE_HIGH; ++ ++ switch (intel_trans_dp_port_sel(crtc)) { ++ case PCH_DP_B: ++ reg |= TRANS_DP_PORT_SEL_B; ++ break; ++ case PCH_DP_C: ++ reg |= TRANS_DP_PORT_SEL_C; ++ break; ++ case PCH_DP_D: ++ reg |= TRANS_DP_PORT_SEL_D; ++ break; ++ default: ++ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); ++ reg |= TRANS_DP_PORT_SEL_B; ++ break; ++ } ++ ++ I915_WRITE(trans_dp_ctl, reg); ++ POSTING_READ(trans_dp_ctl); ++ } ++ + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + /* +@@ -1738,23 +1961,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) + ; + +- /* enable normal */ +- +- temp = I915_READ(fdi_tx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | +- FDI_TX_ENHANCE_FRAME_ENABLE); +- I915_READ(fdi_tx_reg); +- +- temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | +- FDI_RX_ENHANCE_FRAME_ENABLE); +- I915_READ(fdi_rx_reg); +- +- /* wait one idle pattern time */ +- udelay(100); +- + } + + intel_crtc_load_lut(crtc); +@@ -1805,6 +2011,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + I915_READ(pf_ctl_reg); + } + I915_WRITE(pf_win_size, 0); ++ POSTING_READ(pf_win_size); ++ + + /* disable CPU FDI tx and PCH FDI rx */ + temp = I915_READ(fdi_tx_reg); +@@ -1825,11 +2033,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); ++ POSTING_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_1; ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ } + I915_WRITE(fdi_rx_reg, temp); ++ POSTING_READ(fdi_rx_reg); + + udelay(100); + +@@ -1859,6 +2074,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + } + } + } ++ + temp = I915_READ(transconf_reg); + /* BPC in transcoder is consistent with that in pipeconf */ + temp &= ~PIPE_BPC_MASK; +@@ -1867,35 +2083,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + I915_READ(transconf_reg); + udelay(100); + ++ if (HAS_PCH_CPT(dev)) { ++ /* disable TRANS_DP_CTL */ ++ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; ++ int reg; ++ ++ reg = I915_READ(trans_dp_ctl); ++ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); ++ I915_WRITE(trans_dp_ctl, reg); ++ POSTING_READ(trans_dp_ctl); ++ ++ /* disable DPLL_SEL */ ++ temp = I915_READ(PCH_DPLL_SEL); ++ if (trans_dpll_sel == 0) ++ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); ++ else ++ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); ++ I915_WRITE(PCH_DPLL_SEL, temp); ++ I915_READ(PCH_DPLL_SEL); ++ ++ } ++ + /* disable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); +- if ((temp & DPLL_VCO_ENABLE) != 0) { +- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); +- I915_READ(pch_dpll_reg); +- } ++ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); ++ I915_READ(pch_dpll_reg); + + if (HAS_eDP) { + ironlake_disable_pll_edp(crtc); + } + ++ /* Switch from PCDclk to Rawclk */ + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_SEL_PCDCLK; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + ++ /* Disable CPU FDI TX PLL */ ++ temp = I915_READ(fdi_tx_reg); ++ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); ++ I915_READ(fdi_tx_reg); ++ udelay(100); ++ + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_RX_PLL_ENABLE; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + +- /* Disable CPU FDI TX PLL */ +- temp = I915_READ(fdi_tx_reg); +- if ((temp & FDI_TX_PLL_ENABLE) != 0) { +- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); +- I915_READ(fdi_tx_reg); +- udelay(100); +- } +- + /* Wait for the clocks to turn off. */ + udelay(100); + break; +@@ -2122,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, + if (mode->clock * 3 > 27000 * 4) + return MODE_CLOCK_HIGH; + } ++ ++ drm_mode_set_crtcinfo(adjusted_mode, 0); + return true; + } + +@@ -2331,6 +2567,30 @@ static struct intel_watermark_params i830_wm_info = { + I830_FIFO_LINE_SIZE + }; + ++static struct intel_watermark_params ironlake_display_wm_info = { ++ ILK_DISPLAY_FIFO, ++ ILK_DISPLAY_MAXWM, ++ ILK_DISPLAY_DFTWM, ++ 2, ++ ILK_FIFO_LINE_SIZE ++}; ++ ++static struct intel_watermark_params ironlake_display_srwm_info = { ++ ILK_DISPLAY_SR_FIFO, ++ ILK_DISPLAY_MAX_SRWM, ++ ILK_DISPLAY_DFT_SRWM, ++ 2, ++ ILK_FIFO_LINE_SIZE ++}; ++ ++static struct intel_watermark_params ironlake_cursor_srwm_info = { ++ ILK_CURSOR_SR_FIFO, ++ ILK_CURSOR_MAX_SRWM, ++ ILK_CURSOR_DFT_SRWM, ++ 2, ++ ILK_FIFO_LINE_SIZE ++}; ++ + /** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock +@@ -2382,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, + + struct cxsr_latency { + int is_desktop; ++ int is_ddr3; + unsigned long fsb_freq; + unsigned long mem_freq; + unsigned long display_sr; +@@ -2391,33 +2652,45 @@ struct cxsr_latency { + }; + + static struct cxsr_latency cxsr_latency_table[] = { +- {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ +- {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ +- {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ +- +- {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ +- {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ +- {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ +- +- {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ +- {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ +- {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ +- +- {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ +- {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ +- {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ +- +- {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ +- {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ +- {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ +- +- {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ +- {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ +- {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ ++ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ ++ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ ++ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ ++ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ ++ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ ++ ++ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ ++ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ ++ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ ++ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ ++ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ ++ ++ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ ++ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ ++ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ ++ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ ++ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ ++ ++ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ ++ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ ++ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ ++ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ ++ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ ++ ++ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ ++ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ ++ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ ++ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ ++ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ ++ ++ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ ++ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ ++ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ ++ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ ++ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ + }; + +-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, +- int mem) ++static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, ++ int fsb, int mem) + { + int i; + struct cxsr_latency *latency; +@@ -2428,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && ++ is_ddr3 == latency->is_ddr3 && + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; + } +@@ -2449,66 +2723,6 @@ static void pineview_disable_cxsr(struct drm_device *dev) + DRM_INFO("Big FIFO is disabled\n"); + } + +-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, +- int pixel_size) +-{ +- struct drm_i915_private *dev_priv = dev->dev_private; +- u32 reg; +- unsigned long wm; +- struct cxsr_latency *latency; +- +- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, +- dev_priv->mem_freq); +- if (!latency) { +- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); +- pineview_disable_cxsr(dev); +- return; +- } +- +- /* Display SR */ +- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, +- latency->display_sr); +- reg = I915_READ(DSPFW1); +- reg &= 0x7fffff; +- reg |= wm << 23; +- I915_WRITE(DSPFW1, reg); +- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); +- +- /* cursor SR */ +- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, +- latency->cursor_sr); +- reg = I915_READ(DSPFW3); +- reg &= ~(0x3f << 24); +- reg |= (wm & 0x3f) << 24; +- I915_WRITE(DSPFW3, reg); +- +- /* Display HPLL off SR */ +- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, +- latency->display_hpll_disable, I915_FIFO_LINE_SIZE); +- reg = I915_READ(DSPFW3); +- reg &= 0xfffffe00; +- reg |= wm & 0x1ff; +- I915_WRITE(DSPFW3, reg); +- +- /* cursor HPLL off SR */ +- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, +- latency->cursor_hpll_disable); +- reg = I915_READ(DSPFW3); +- reg &= ~(0x3f << 16); +- reg |= (wm & 0x3f) << 16; +- I915_WRITE(DSPFW3, reg); +- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); +- +- /* activate cxsr */ +- reg = I915_READ(DSPFW3); +- reg |= PINEVIEW_SELF_REFRESH_EN; +- I915_WRITE(DSPFW3, reg); +- +- DRM_INFO("Big FIFO is enabled\n"); +- +- return; +-} +- + /* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) +@@ -2593,6 +2807,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) + return size; + } + ++static void pineview_update_wm(struct drm_device *dev, int planea_clock, ++ int planeb_clock, int sr_hdisplay, int pixel_size) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 reg; ++ unsigned long wm; ++ struct cxsr_latency *latency; ++ int sr_clock; ++ ++ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, ++ dev_priv->fsb_freq, dev_priv->mem_freq); ++ if (!latency) { ++ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); ++ pineview_disable_cxsr(dev); ++ return; ++ } ++ ++ if (!planea_clock || !planeb_clock) { ++ sr_clock = planea_clock ? planea_clock : planeb_clock; ++ ++ /* Display SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_display_wm, ++ pixel_size, latency->display_sr); ++ reg = I915_READ(DSPFW1); ++ reg &= ~DSPFW_SR_MASK; ++ reg |= wm << DSPFW_SR_SHIFT; ++ I915_WRITE(DSPFW1, reg); ++ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); ++ ++ /* cursor SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, ++ pixel_size, latency->cursor_sr); ++ reg = I915_READ(DSPFW3); ++ reg &= ~DSPFW_CURSOR_SR_MASK; ++ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; ++ I915_WRITE(DSPFW3, reg); ++ ++ /* Display HPLL off SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, ++ pixel_size, latency->display_hpll_disable); ++ reg = I915_READ(DSPFW3); ++ reg &= ~DSPFW_HPLL_SR_MASK; ++ reg |= wm & DSPFW_HPLL_SR_MASK; ++ I915_WRITE(DSPFW3, reg); ++ ++ /* cursor HPLL off SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, ++ pixel_size, latency->cursor_hpll_disable); ++ reg = I915_READ(DSPFW3); ++ reg &= ~DSPFW_HPLL_CURSOR_MASK; ++ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; ++ I915_WRITE(DSPFW3, reg); ++ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); ++ ++ /* activate cxsr */ ++ reg = I915_READ(DSPFW3); ++ reg |= PINEVIEW_SELF_REFRESH_EN; ++ I915_WRITE(DSPFW3, reg); ++ DRM_DEBUG_KMS("Self-refresh is enabled\n"); ++ } else { ++ pineview_disable_cxsr(dev); ++ DRM_DEBUG_KMS("Self-refresh is disabled\n"); ++ } ++} ++ + static void g4x_update_wm(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size) + { +@@ -2813,6 +3092,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, + I915_WRITE(FW_BLC, fwater_lo); + } + ++#define ILK_LP0_PLANE_LATENCY 700 ++ ++static void ironlake_update_wm(struct drm_device *dev, int planea_clock, ++ int planeb_clock, int sr_hdisplay, int pixel_size) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int planea_wm, planeb_wm, cursora_wm, cursorb_wm; ++ int sr_wm, cursor_wm; ++ unsigned long line_time_us; ++ int sr_clock, entries_required; ++ u32 reg_value; ++ ++ /* Calculate and update the watermark for plane A */ ++ if (planea_clock) { ++ entries_required = ((planea_clock / 1000) * pixel_size * ++ ILK_LP0_PLANE_LATENCY) / 1000; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_display_wm_info.cacheline_size); ++ planea_wm = entries_required + ++ ironlake_display_wm_info.guard_size; ++ ++ if (planea_wm > (int)ironlake_display_wm_info.max_wm) ++ planea_wm = ironlake_display_wm_info.max_wm; ++ ++ cursora_wm = 16; ++ reg_value = I915_READ(WM0_PIPEA_ILK); ++ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); ++ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | ++ (cursora_wm & WM0_PIPE_CURSOR_MASK); ++ I915_WRITE(WM0_PIPEA_ILK, reg_value); ++ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " ++ "cursor: %d\n", planea_wm, cursora_wm); ++ } ++ /* Calculate and update the watermark for plane B */ ++ if (planeb_clock) { ++ entries_required = ((planeb_clock / 1000) * pixel_size * ++ ILK_LP0_PLANE_LATENCY) / 1000; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_display_wm_info.cacheline_size); ++ planeb_wm = entries_required + ++ ironlake_display_wm_info.guard_size; ++ ++ if (planeb_wm > (int)ironlake_display_wm_info.max_wm) ++ planeb_wm = ironlake_display_wm_info.max_wm; ++ ++ cursorb_wm = 16; ++ reg_value = I915_READ(WM0_PIPEB_ILK); ++ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); ++ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | ++ (cursorb_wm & WM0_PIPE_CURSOR_MASK); ++ I915_WRITE(WM0_PIPEB_ILK, reg_value); ++ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " ++ "cursor: %d\n", planeb_wm, cursorb_wm); ++ } ++ ++ /* ++ * Calculate and update the self-refresh watermark only when one ++ * display plane is used. ++ */ ++ if (!planea_clock || !planeb_clock) { ++ int line_count; ++ /* Read the self-refresh latency. The unit is 0.5us */ ++ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; ++ ++ sr_clock = planea_clock ? planea_clock : planeb_clock; ++ line_time_us = ((sr_hdisplay * 1000) / sr_clock); ++ ++ /* Use ns/us then divide to preserve precision */ ++ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) ++ / 1000; ++ ++ /* calculate the self-refresh watermark for display plane */ ++ entries_required = line_count * sr_hdisplay * pixel_size; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_display_srwm_info.cacheline_size); ++ sr_wm = entries_required + ++ ironlake_display_srwm_info.guard_size; ++ ++ /* calculate the self-refresh watermark for display cursor */ ++ entries_required = line_count * pixel_size * 64; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_cursor_srwm_info.cacheline_size); ++ cursor_wm = entries_required + ++ ironlake_cursor_srwm_info.guard_size; ++ ++ /* configure watermark and enable self-refresh */ ++ reg_value = I915_READ(WM1_LP_ILK); ++ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | ++ WM1_LP_CURSOR_MASK); ++ reg_value |= WM1_LP_SR_EN | ++ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | ++ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; ++ ++ I915_WRITE(WM1_LP_ILK, reg_value); ++ DRM_DEBUG_KMS("self-refresh watermark: display plane %d " ++ "cursor %d\n", sr_wm, cursor_wm); ++ ++ } else { ++ /* Turn off self refresh if both pipes are enabled */ ++ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); ++ } ++} + /** + * intel_update_watermarks - update FIFO watermark values based on current modes + * +@@ -2882,12 +3263,6 @@ static void intel_update_watermarks(struct drm_device *dev) + if (enabled <= 0) + return; + +- /* Single plane configs can enable self refresh */ +- if (enabled == 1 && IS_PINEVIEW(dev)) +- pineview_enable_cxsr(dev, sr_clock, pixel_size); +- else if (IS_PINEVIEW(dev)) +- pineview_disable_cxsr(dev); +- + dev_priv->display.update_wm(dev, planea_clock, planeb_clock, + sr_hdisplay, pixel_size); + } +@@ -2924,7 +3299,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + bool is_edp = false; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; ++ struct intel_encoder *intel_encoder = NULL; + const intel_limit_t *limit; + int ret; + struct fdi_m_n m_n = {0}; +@@ -2935,6 +3311,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; + int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; ++ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; ++ int trans_dpll_sel = (pipe == 0) ? 0 : 1; + int lvds_reg = LVDS; + u32 temp; + int sdvo_pixel_multiply; +@@ -2942,12 +3320,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + + drm_vblank_pre_modeset(dev, pipe); + +- list_for_each_entry(connector, &mode_config->connector_list, head) { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { + +- if (!connector->encoder || connector->encoder->crtc != crtc) ++ if (!encoder || encoder->crtc != crtc) + continue; + ++ intel_encoder = enc_to_intel_encoder(encoder); ++ + switch (intel_encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; +@@ -3043,14 +3422,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + + /* FDI link */ + if (HAS_PCH_SPLIT(dev)) { +- int lane, link_bw, bpp; ++ int lane = 0, link_bw, bpp; + /* eDP doesn't require FDI link, so just set DP M/N + according to current link config */ + if (is_edp) { +- struct drm_connector *edp; + target_clock = mode->clock; +- edp = intel_pipe_get_connector(crtc); +- intel_edp_link_config(to_intel_encoder(edp), ++ intel_edp_link_config(intel_encoder, + &lane, &link_bw); + } else { + /* DP over FDI requires target mode clock +@@ -3059,7 +3436,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + target_clock = mode->clock; + else + target_clock = adjusted_mode->clock; +- lane = 4; + link_bw = 270000; + } + +@@ -3111,6 +3487,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + bpp = 24; + } + ++ if (!lane) { ++ /* ++ * Account for spread spectrum to avoid ++ * oversubscribing the link. Max center spread ++ * is 2.5%; use 5% for safety's sake. ++ */ ++ u32 bps = target_clock * bpp * 21 / 20; ++ lane = bps / (link_bw * 8) + 1; ++ } ++ ++ intel_crtc->fdi_lanes = lane; ++ + ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); + } + +@@ -3292,6 +3680,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + udelay(150); + } + ++ /* enable transcoder DPLL */ ++ if (HAS_PCH_CPT(dev)) { ++ temp = I915_READ(PCH_DPLL_SEL); ++ if (trans_dpll_sel == 0) ++ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); ++ else ++ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); ++ I915_WRITE(PCH_DPLL_SEL, temp); ++ I915_READ(PCH_DPLL_SEL); ++ udelay(150); ++ } ++ + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. +@@ -3303,7 +3703,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + lvds_reg = PCH_LVDS; + + lvds = I915_READ(lvds_reg); +- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; ++ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; ++ if (pipe == 1) { ++ if (HAS_PCH_CPT(dev)) ++ lvds |= PORT_TRANS_B_SEL_CPT; ++ else ++ lvds |= LVDS_PIPEB_SELECT; ++ } else { ++ if (HAS_PCH_CPT(dev)) ++ lvds &= ~PORT_TRANS_SEL_MASK; ++ else ++ lvds &= ~LVDS_PIPEB_SELECT; ++ } + /* set the corresponsding LVDS_BORDER bit */ + lvds |= dev_priv->lvds_border_bits; + /* Set the B0-B3 data pairs corresponding to whether we're going to +@@ -3321,14 +3732,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* set the dithering flag */ + if (IS_I965G(dev)) { + if (dev_priv->lvds_dither) { +- if (HAS_PCH_SPLIT(dev)) ++ if (HAS_PCH_SPLIT(dev)) { + pipeconf |= PIPE_ENABLE_DITHER; +- else ++ pipeconf |= PIPE_DITHER_TYPE_ST01; ++ } else + lvds |= LVDS_ENABLE_DITHER; + } else { +- if (HAS_PCH_SPLIT(dev)) ++ if (HAS_PCH_SPLIT(dev)) { + pipeconf &= ~PIPE_ENABLE_DITHER; +- else ++ pipeconf &= ~PIPE_DITHER_TYPE_MASK; ++ } else + lvds &= ~LVDS_ENABLE_DITHER; + } + } +@@ -3337,6 +3750,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + } + if (is_dp) + intel_dp_set_m_n(crtc, mode, adjusted_mode); ++ else if (HAS_PCH_SPLIT(dev)) { ++ /* For non-DP output, clear any trans DP clock recovery setting.*/ ++ if (pipe == 0) { ++ I915_WRITE(TRANSA_DATA_M1, 0); ++ I915_WRITE(TRANSA_DATA_N1, 0); ++ I915_WRITE(TRANSA_DP_LINK_M1, 0); ++ I915_WRITE(TRANSA_DP_LINK_N1, 0); ++ } else { ++ I915_WRITE(TRANSB_DATA_M1, 0); ++ I915_WRITE(TRANSB_DATA_N1, 0); ++ I915_WRITE(TRANSB_DP_LINK_M1, 0); ++ I915_WRITE(TRANSB_DP_LINK_N1, 0); ++ } ++ } + + if (!is_edp) { + I915_WRITE(fp_reg, fp); +@@ -3377,6 +3804,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + } + } + ++ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { ++ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; ++ /* the chip adds 2 halflines automatically */ ++ adjusted_mode->crtc_vdisplay -= 1; ++ adjusted_mode->crtc_vtotal -= 1; ++ adjusted_mode->crtc_vblank_start -= 1; ++ adjusted_mode->crtc_vblank_end -= 1; ++ adjusted_mode->crtc_vsync_end -= 1; ++ adjusted_mode->crtc_vsync_start -= 1; ++ } else ++ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ ++ + I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | +@@ -3411,6 +3850,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* enable FDI RX PLL too */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(200); ++ ++ /* enable FDI TX PLL too */ ++ temp = I915_READ(fdi_tx_reg); ++ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); ++ I915_READ(fdi_tx_reg); ++ ++ /* enable FDI RX PCDCLK */ ++ temp = I915_READ(fdi_rx_reg); ++ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); ++ I915_READ(fdi_rx_reg); + udelay(200); + } + } +@@ -3527,6 +3978,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, + DRM_ERROR("failed to pin cursor bo\n"); + goto fail_locked; + } ++ ++ ret = i915_gem_object_set_to_gtt_domain(bo, 0); ++ if (ret) { ++ DRM_ERROR("failed to move cursor bo into the GTT\n"); ++ goto fail_unpin; ++ } ++ + addr = obj_priv->gtt_offset; + } else { + ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); +@@ -3570,6 +4028,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, + intel_crtc->cursor_bo = bo; + + return 0; ++fail_unpin: ++ i915_gem_object_unpin(bo); + fail_locked: + mutex_unlock(&dev->struct_mutex); + fail: +@@ -3671,6 +4131,7 @@ static struct drm_display_mode load_detect_mode = { + }; + + struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, + struct drm_display_mode *mode, + int *dpms_mode) + { +@@ -3729,7 +4190,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, + } + + encoder->crtc = crtc; +- intel_encoder->base.encoder = encoder; ++ connector->encoder = encoder; + intel_encoder->load_detect_temp = true; + + intel_crtc = to_intel_crtc(crtc); +@@ -3755,7 +4216,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, + return crtc; + } + +-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) ++void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, int dpms_mode) + { + struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_device *dev = encoder->dev; +@@ -3765,7 +4227,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm + + if (intel_encoder->load_detect_temp) { + encoder->crtc = NULL; +- intel_encoder->base.encoder = NULL; ++ connector->encoder = NULL; + intel_encoder->load_detect_temp = false; + crtc->enabled = drm_helper_crtc_in_use(crtc); + drm_helper_disable_unused_functions(dev); +@@ -4027,6 +4489,8 @@ static void intel_idle_update(struct work_struct *work) + + mutex_lock(&dev->struct_mutex); + ++ i915_update_gfx_val(dev_priv); ++ + if (IS_I945G(dev) || IS_I945GM(dev)) { + DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); +@@ -4155,12 +4619,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + if (work == NULL || !work->pending) { +- if (work && !work->pending) { +- obj_priv = to_intel_bo(work->pending_flip_obj); +- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", +- obj_priv, +- atomic_read(&obj_priv->pending_flip)); +- } + spin_unlock_irqrestore(&dev->event_lock, flags); + return; + } +@@ -4220,14 +4678,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + unsigned long flags; + int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; + int ret, pipesrc; +- RING_LOCALS; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + +- mutex_lock(&dev->struct_mutex); +- + work->event = event; + work->dev = crtc->dev; + intel_fb = to_intel_framebuffer(crtc->fb); +@@ -4237,10 +4692,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + /* We borrow the event spin lock for protecting unpin_work */ + spin_lock_irqsave(&dev->event_lock, flags); + if (intel_crtc->unpin_work) { +- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(work); +- mutex_unlock(&dev->struct_mutex); ++ ++ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + return -EBUSY; + } + intel_crtc->unpin_work = work; +@@ -4249,13 +4704,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + ++ mutex_lock(&dev->struct_mutex); + ret = intel_pin_and_fence_fb_obj(dev, obj); + if (ret != 0) { +- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", +- to_intel_bo(obj)); +- kfree(work); +- intel_crtc->unpin_work = NULL; + mutex_unlock(&dev->struct_mutex); ++ ++ spin_lock_irqsave(&dev->event_lock, flags); ++ intel_crtc->unpin_work = NULL; ++ spin_unlock_irqrestore(&dev->event_lock, flags); ++ ++ kfree(work); ++ ++ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", ++ to_intel_bo(obj)); + return ret; + } + +@@ -4392,14 +4853,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) + return crtc; + } + +-static int intel_connector_clones(struct drm_device *dev, int type_mask) ++static int intel_encoder_clones(struct drm_device *dev, int type_mask) + { + int index_mask = 0; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + int entry = 0; + +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + if (type_mask & intel_encoder->clone_mask) + index_mask |= (1 << entry); + entry++; +@@ -4411,7 +4872,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) + static void intel_setup_outputs(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + + intel_crt_init(dev); + +@@ -4426,9 +4887,8 @@ static void intel_setup_outputs(struct drm_device *dev) + intel_dp_init(dev, DP_A); + + if (I915_READ(HDMIB) & PORT_DETECTED) { +- /* check SDVOB */ +- /* found = intel_sdvo_init(dev, HDMIB); */ +- found = 0; ++ /* PCH SDVOB multiplex with HDMIB */ ++ found = intel_sdvo_init(dev, PCH_SDVOB); + if (!found) + intel_hdmi_init(dev, HDMIB); + if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) +@@ -4494,12 +4954,11 @@ static void intel_setup_outputs(struct drm_device *dev) + if (SUPPORTS_TV(dev)) + intel_tv_init(dev); + +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct drm_encoder *encoder = &intel_encoder->enc; ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + encoder->possible_crtcs = intel_encoder->crtc_mask; +- encoder->possible_clones = intel_connector_clones(dev, ++ encoder->possible_clones = intel_encoder_clones(dev, + intel_encoder->clone_mask); + } + } +@@ -4507,10 +4966,6 @@ static void intel_setup_outputs(struct drm_device *dev) + static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) + { + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); +- struct drm_device *dev = fb->dev; +- +- if (fb->fbdev) +- intelfb_remove(dev, fb); + + drm_framebuffer_cleanup(fb); + drm_gem_object_unreference_unlocked(intel_fb->obj); +@@ -4533,18 +4988,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { + .create_handle = intel_user_framebuffer_create_handle, + }; + +-int intel_framebuffer_create(struct drm_device *dev, +- struct drm_mode_fb_cmd *mode_cmd, +- struct drm_framebuffer **fb, +- struct drm_gem_object *obj) ++int intel_framebuffer_init(struct drm_device *dev, ++ struct intel_framebuffer *intel_fb, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object *obj) + { +- struct intel_framebuffer *intel_fb; + int ret; + +- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); +- if (!intel_fb) +- return -ENOMEM; +- + ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); + if (ret) { + DRM_ERROR("framebuffer init failed %d\n", ret); +@@ -4552,40 +5002,41 @@ int intel_framebuffer_create(struct drm_device *dev, + } + + drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); +- + intel_fb->obj = obj; +- +- *fb = &intel_fb->base; +- + return 0; + } + +- + static struct drm_framebuffer * + intel_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + struct drm_mode_fb_cmd *mode_cmd) + { + struct drm_gem_object *obj; +- struct drm_framebuffer *fb; ++ struct intel_framebuffer *intel_fb; + int ret; + + obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); + if (!obj) + return NULL; + +- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); ++ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); ++ if (!intel_fb) ++ return NULL; ++ ++ ret = intel_framebuffer_init(dev, intel_fb, ++ mode_cmd, obj); + if (ret) { + drm_gem_object_unreference_unlocked(obj); ++ kfree(intel_fb); + return NULL; + } + +- return fb; ++ return &intel_fb->base; + } + + static const struct drm_mode_config_funcs intel_mode_funcs = { + .fb_create = intel_user_framebuffer_create, +- .fb_changed = intelfb_probe, ++ .output_poll_changed = intel_fb_output_poll_changed, + }; + + static struct drm_gem_object * +@@ -4594,7 +5045,7 @@ intel_alloc_power_context(struct drm_device *dev) + struct drm_gem_object *pwrctx; + int ret; + +- pwrctx = drm_gem_object_alloc(dev, 4096); ++ pwrctx = i915_gem_alloc_object(dev, 4096); + if (!pwrctx) { + DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); + return NULL; +@@ -4624,10 +5075,32 @@ err_unref: + return NULL; + } + ++bool ironlake_set_drps(struct drm_device *dev, u8 val) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u16 rgvswctl; ++ ++ rgvswctl = I915_READ16(MEMSWCTL); ++ if (rgvswctl & MEMCTL_CMD_STS) { ++ DRM_DEBUG("gpu busy, RCS change rejected\n"); ++ return false; /* still busy with another command */ ++ } ++ ++ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | ++ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; ++ I915_WRITE16(MEMSWCTL, rgvswctl); ++ POSTING_READ16(MEMSWCTL); ++ ++ rgvswctl |= MEMCTL_CMD_STS; ++ I915_WRITE16(MEMSWCTL, rgvswctl); ++ ++ return true; ++} ++ + void ironlake_enable_drps(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; ++ u32 rgvmodectl = I915_READ(MEMMODECTL); + u8 fmax, fmin, fstart, vstart; + int i = 0; + +@@ -4646,13 +5119,21 @@ void ironlake_enable_drps(struct drm_device *dev) + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; ++ fstart = fmax; ++ + vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + +- dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ ++ dev_priv->fmax = fstart; /* IPS callback will increase this */ ++ dev_priv->fstart = fstart; ++ ++ dev_priv->max_delay = fmax; + dev_priv->min_delay = fmin; + dev_priv->cur_delay = fstart; + ++ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, ++ fstart); ++ + I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + /* +@@ -4674,20 +5155,19 @@ void ironlake_enable_drps(struct drm_device *dev) + } + msleep(1); + +- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | +- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; +- I915_WRITE(MEMSWCTL, rgvswctl); +- POSTING_READ(MEMSWCTL); ++ ironlake_set_drps(dev, fstart); + +- rgvswctl |= MEMCTL_CMD_STS; +- I915_WRITE(MEMSWCTL, rgvswctl); ++ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + ++ I915_READ(0x112e0); ++ dev_priv->last_time1 = jiffies_to_msecs(jiffies); ++ dev_priv->last_count2 = I915_READ(0x112f4); ++ getrawmonotonic(&dev_priv->last_time2); + } + + void ironlake_disable_drps(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 rgvswctl; +- u8 fstart; ++ u16 rgvswctl = I915_READ16(MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); +@@ -4697,11 +5177,7 @@ void ironlake_disable_drps(struct drm_device *dev) + I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ +- fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> +- MEMMODE_FSTART_SHIFT; +- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | +- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; +- I915_WRITE(MEMSWCTL, rgvswctl); ++ ironlake_set_drps(dev, dev_priv->fstart); + msleep(1); + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); +@@ -4709,6 +5185,92 @@ void ironlake_disable_drps(struct drm_device *dev) + + } + ++static unsigned long intel_pxfreq(u32 vidfreq) ++{ ++ unsigned long freq; ++ int div = (vidfreq & 0x3f0000) >> 16; ++ int post = (vidfreq & 0x3000) >> 12; ++ int pre = (vidfreq & 0x7); ++ ++ if (!pre) ++ return 0; ++ ++ freq = ((div * 133333) / ((1<dev_private; ++ u32 lcfuse; ++ u8 pxw[16]; ++ int i; ++ ++ /* Disable to program */ ++ I915_WRITE(ECR, 0); ++ POSTING_READ(ECR); ++ ++ /* Program energy weights for various events */ ++ I915_WRITE(SDEW, 0x15040d00); ++ I915_WRITE(CSIEW0, 0x007f0000); ++ I915_WRITE(CSIEW1, 0x1e220004); ++ I915_WRITE(CSIEW2, 0x04000004); ++ ++ for (i = 0; i < 5; i++) ++ I915_WRITE(PEW + (i * 4), 0); ++ for (i = 0; i < 3; i++) ++ I915_WRITE(DEW + (i * 4), 0); ++ ++ /* Program P-state weights to account for frequency power adjustment */ ++ for (i = 0; i < 16; i++) { ++ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); ++ unsigned long freq = intel_pxfreq(pxvidfreq); ++ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> ++ PXVFREQ_PX_SHIFT; ++ unsigned long val; ++ ++ val = vid * vid; ++ val *= (freq / 1000); ++ val *= 255; ++ val /= (127*127*900); ++ if (val > 0xff) ++ DRM_ERROR("bad pxval: %ld\n", val); ++ pxw[i] = val; ++ } ++ /* Render standby states get 0 weight */ ++ pxw[14] = 0; ++ pxw[15] = 0; ++ ++ for (i = 0; i < 4; i++) { ++ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | ++ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); ++ I915_WRITE(PXW + (i * 4), val); ++ } ++ ++ /* Adjust magic regs to magic values (more experimental results) */ ++ I915_WRITE(OGW0, 0); ++ I915_WRITE(OGW1, 0); ++ I915_WRITE(EG0, 0x00007f00); ++ I915_WRITE(EG1, 0x0000000e); ++ I915_WRITE(EG2, 0x000e0000); ++ I915_WRITE(EG3, 0x68000300); ++ I915_WRITE(EG4, 0x42000000); ++ I915_WRITE(EG5, 0x00140031); ++ I915_WRITE(EG6, 0); ++ I915_WRITE(EG7, 0); ++ ++ for (i = 0; i < 8; i++) ++ I915_WRITE(PXWL + (i * 4), 0); ++ ++ /* Enable PMON + select events */ ++ I915_WRITE(ECR, 0x80000019); ++ ++ lcfuse = I915_READ(LCFUSE02); ++ ++ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); ++} ++ + void intel_init_clock_gating(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -4732,6 +5294,25 @@ void intel_init_clock_gating(struct drm_device *dev) + } + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); ++ ++ /* ++ * According to the spec the following bits should be set in ++ * order to enable memory self-refresh ++ * The bit 22/21 of 0x42004 ++ * The bit 5 of 0x42020 ++ * The bit 15 of 0x45000 ++ */ ++ if (IS_IRONLAKE(dev)) { ++ I915_WRITE(ILK_DISPLAY_CHICKEN2, ++ (I915_READ(ILK_DISPLAY_CHICKEN2) | ++ ILK_DPARB_GATE | ILK_VSDPFD_FULL)); ++ I915_WRITE(ILK_DSPCLK_GATE, ++ (I915_READ(ILK_DSPCLK_GATE) | ++ ILK_DPARB_CLK_GATE)); ++ I915_WRITE(DISP_ARB_CTL, ++ (I915_READ(DISP_ARB_CTL) | ++ DISP_FBC_WM_DIS)); ++ } + return; + } else if (IS_G4X(dev)) { + uint32_t dspclk_gate; +@@ -4809,8 +5390,7 @@ static void intel_init_display(struct drm_device *dev) + else + dev_priv->display.dpms = i9xx_crtc_dpms; + +- /* Only mobile has FBC, leave pointers NULL for other chips */ +- if (IS_MOBILE(dev)) { ++ if (I915_HAS_FBC(dev)) { + if (IS_GM45(dev)) { + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; +@@ -4847,9 +5427,33 @@ static void intel_init_display(struct drm_device *dev) + i830_get_display_clock_speed; + + /* For FIFO watermark updates */ +- if (HAS_PCH_SPLIT(dev)) +- dev_priv->display.update_wm = NULL; +- else if (IS_G4X(dev)) ++ if (HAS_PCH_SPLIT(dev)) { ++ if (IS_IRONLAKE(dev)) { ++ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) ++ dev_priv->display.update_wm = ironlake_update_wm; ++ else { ++ DRM_DEBUG_KMS("Failed to get proper latency. " ++ "Disable CxSR\n"); ++ dev_priv->display.update_wm = NULL; ++ } ++ } else ++ dev_priv->display.update_wm = NULL; ++ } else if (IS_PINEVIEW(dev)) { ++ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), ++ dev_priv->is_ddr3, ++ dev_priv->fsb_freq, ++ dev_priv->mem_freq)) { ++ DRM_INFO("failed to find known CxSR latency " ++ "(found ddr%s fsb freq %d, mem freq %d), " ++ "disabling CxSR\n", ++ (dev_priv->is_ddr3 == 1) ? "3": "2", ++ dev_priv->fsb_freq, dev_priv->mem_freq); ++ /* Disable CxSR and never update its watermark again */ ++ pineview_disable_cxsr(dev); ++ dev_priv->display.update_wm = NULL; ++ } else ++ dev_priv->display.update_wm = pineview_update_wm; ++ } else if (IS_G4X(dev)) + dev_priv->display.update_wm = g4x_update_wm; + else if (IS_I965G(dev)) + dev_priv->display.update_wm = i965_update_wm; +@@ -4871,7 +5475,6 @@ static void intel_init_display(struct drm_device *dev) + void intel_modeset_init(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- int num_pipe; + int i; + + drm_mode_config_init(dev); +@@ -4901,13 +5504,13 @@ void intel_modeset_init(struct drm_device *dev) + dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); + + if (IS_MOBILE(dev) || IS_I9XX(dev)) +- num_pipe = 2; ++ dev_priv->num_pipe = 2; + else +- num_pipe = 1; ++ dev_priv->num_pipe = 1; + DRM_DEBUG_KMS("%d display pipe%s available.\n", +- num_pipe, num_pipe > 1 ? "s" : ""); ++ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); + +- for (i = 0; i < num_pipe; i++) { ++ for (i = 0; i < dev_priv->num_pipe; i++) { + intel_crtc_init(dev, i); + } + +@@ -4915,21 +5518,16 @@ void intel_modeset_init(struct drm_device *dev) + + intel_init_clock_gating(dev); + +- if (IS_IRONLAKE_M(dev)) ++ if (IS_IRONLAKE_M(dev)) { + ironlake_enable_drps(dev); ++ intel_init_emon(dev); ++ } + + INIT_WORK(&dev_priv->idle_work, intel_idle_update); + setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, + (unsigned long)dev); + + intel_setup_overlay(dev); +- +- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), +- dev_priv->fsb_freq, +- dev_priv->mem_freq)) +- DRM_INFO("failed to find known CxSR latency " +- "(found fsb freq %d, mem freq %d), disabling CxSR\n", +- dev_priv->fsb_freq, dev_priv->mem_freq); + } + + void intel_modeset_cleanup(struct drm_device *dev) +@@ -4940,6 +5538,9 @@ void intel_modeset_cleanup(struct drm_device *dev) + + mutex_lock(&dev->struct_mutex); + ++ drm_kms_helper_poll_fini(dev); ++ intel_fbdev_fini(dev); ++ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + /* Skip inactive CRTCs */ + if (!crtc->fb) +@@ -4974,14 +5575,29 @@ void intel_modeset_cleanup(struct drm_device *dev) + } + + +-/* current intel driver doesn't take advantage of encoders +- always give back the encoder for the connector +-*/ +-struct drm_encoder *intel_best_encoder(struct drm_connector *connector) ++/* ++ * Return which encoder is currently attached for connector. ++ */ ++struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_mode_object *obj; ++ struct drm_encoder *encoder; ++ int i; + +- return &intel_encoder->enc; ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == 0) ++ break; ++ ++ obj = drm_mode_object_find(connector->dev, ++ connector->encoder_ids[i], ++ DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ continue; ++ ++ encoder = obj_to_encoder(obj); ++ return encoder; ++ } ++ return NULL; + } + + /* +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c +index 77e40cf..49b54f0 100644 +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -48,8 +48,6 @@ struct intel_dp_priv { + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; +- uint32_t save_DP; +- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + int dpms_mode; + uint8_t link_bw; +@@ -141,7 +139,8 @@ static int + intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); + int max_lanes = intel_dp_max_lane_count(intel_encoder); + +@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, + { + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint32_t output_reg = dp_priv->output_reg; +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = output_reg + 0x10; + uint32_t ch_data = ch_ctl + 4; +@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, + uint32_t ctl; + uint32_t status; + uint32_t aux_clock_divider; +- int try; ++ int try, precharge; + + /* The clock divider is based off the hrawclk, + * and would like to run at 2MHz. So, take the + * hrawclk value and divide by 2 and use that + */ +- if (IS_eDP(intel_encoder)) +- aux_clock_divider = 225; /* eDP input clock at 450Mhz */ +- else if (HAS_PCH_SPLIT(dev)) ++ if (IS_eDP(intel_encoder)) { ++ if (IS_GEN6(dev)) ++ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ ++ else ++ aux_clock_divider = 225; /* eDP input clock at 450Mhz */ ++ } else if (HAS_PCH_SPLIT(dev)) + aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ + else + aux_clock_divider = intel_hrawclk(dev) / 2; + ++ if (IS_GEN6(dev)) ++ precharge = 3; ++ else ++ precharge = 5; ++ + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ +@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, + ctl = (DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | +- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | ++ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | +@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + } + + static int +-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) ++intel_dp_i2c_init(struct intel_encoder *intel_encoder, ++ struct intel_connector *intel_connector, const char *name) + { + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + +@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) + strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); + dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; + dp_priv->adapter.algo_data = &dp_priv->algo; +- dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; ++ dp_priv->adapter.dev.parent = &intel_connector->base.kdev; + + return i2c_dp_aux_add_bus(&dp_priv->adapter); + } +@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + { + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int lane_count = 4; +@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + /* + * Find the lane count in the intel_encoder private + */ +- list_for_each_entry(connector, &mode_config->connector_list, head) { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { ++ struct intel_encoder *intel_encoder; ++ struct intel_dp_priv *dp_priv; + +- if (!connector->encoder || connector->encoder->crtc != crtc) ++ if (encoder->crtc != crtc) + continue; + ++ intel_encoder = enc_to_intel_encoder(encoder); ++ dp_priv = intel_encoder->dev_priv; ++ + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + lane_count = dp_priv->lane_count; + break; +@@ -626,16 +637,24 @@ static void + intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { ++ struct drm_device *dev = encoder->dev; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_crtc *crtc = intel_encoder->enc.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + +- dp_priv->DP = (DP_LINK_TRAIN_OFF | +- DP_VOLTAGE_0_4 | +- DP_PRE_EMPHASIS_0 | +- DP_SYNC_VS_HIGH | +- DP_SYNC_HS_HIGH); ++ dp_priv->DP = (DP_VOLTAGE_0_4 | ++ DP_PRE_EMPHASIS_0); ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ++ dp_priv->DP |= DP_SYNC_HS_HIGH; ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ++ dp_priv->DP |= DP_SYNC_VS_HIGH; ++ ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; ++ else ++ dp_priv->DP |= DP_LINK_TRAIN_OFF; + + switch (dp_priv->lane_count) { + case 1: +@@ -656,15 +675,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + dp_priv->link_configuration[1] = dp_priv->lane_count; + + /* +- * Check for DPCD version > 1.1, +- * enable enahanced frame stuff in that case ++ * Check for DPCD version > 1.1 and enhanced framing support + */ +- if (dp_priv->dpcd[0] >= 0x11) { ++ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { + dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + dp_priv->DP |= DP_ENHANCED_FRAMING; + } + +- if (intel_crtc->pipe == 1) ++ /* CPT DP's pipe select is decided in TRANS_DP_CTL */ ++ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) + dp_priv->DP |= DP_PIPEB_SELECT; + + if (IS_eDP(intel_encoder)) { +@@ -704,7 +723,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) + { + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(dp_priv->output_reg); + +@@ -749,20 +768,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + return link_status[r - DP_LANE0_1_STATUS]; + } + +-static void +-intel_dp_save(struct drm_connector *connector) +-{ +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct drm_device *dev = intel_encoder->base.dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; +- +- dp_priv->save_DP = I915_READ(dp_priv->output_reg); +- intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, +- dp_priv->save_link_configuration, +- sizeof (dp_priv->save_link_configuration)); +-} +- + static uint8_t + intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +@@ -892,6 +897,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) + return signal_levels; + } + ++/* Gen6's DP voltage swing and pre-emphasis control */ ++static uint32_t ++intel_gen6_edp_signal_levels(uint8_t train_set) ++{ ++ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { ++ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: ++ return EDP_LINK_TRAIN_400MV_0DB_SNB_B; ++ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: ++ return EDP_LINK_TRAIN_400MV_6DB_SNB_B; ++ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: ++ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; ++ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: ++ return EDP_LINK_TRAIN_800MV_0DB_SNB_B; ++ default: ++ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); ++ return EDP_LINK_TRAIN_400MV_0DB_SNB_B; ++ } ++} ++ + static uint8_t + intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +@@ -948,7 +972,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder, + uint8_t train_set[4], + bool first) + { +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + int ret; +@@ -974,7 +998,7 @@ static void + intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) + { +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint8_t train_set[4]; +@@ -985,23 +1009,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + bool channel_eq = false; + bool first = true; + int tries; ++ u32 reg; + + /* Write the link configuration data */ +- intel_dp_aux_native_write(intel_encoder, 0x100, ++ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, + link_configuration, DP_LINK_CONFIGURATION_SIZE); + + DP |= DP_PORT_EN; +- DP &= ~DP_LINK_TRAIN_MASK; ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ DP &= ~DP_LINK_TRAIN_MASK_CPT; ++ else ++ DP &= ~DP_LINK_TRAIN_MASK; + memset(train_set, 0, 4); + voltage = 0xff; + tries = 0; + clock_recovery = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ +- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); +- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ uint32_t signal_levels; ++ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { ++ signal_levels = intel_gen6_edp_signal_levels(train_set[0]); ++ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; ++ } else { ++ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); ++ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ } ++ ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ reg = DP | DP_LINK_TRAIN_PAT_1_CPT; ++ else ++ reg = DP | DP_LINK_TRAIN_PAT_1; + +- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, ++ if (!intel_dp_set_link_train(intel_encoder, reg, + DP_TRAINING_PATTERN_1, train_set, first)) + break; + first = false; +@@ -1041,11 +1080,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + channel_eq = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ +- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); +- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ uint32_t signal_levels; ++ ++ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { ++ signal_levels = intel_gen6_edp_signal_levels(train_set[0]); ++ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; ++ } else { ++ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); ++ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ } ++ ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ reg = DP | DP_LINK_TRAIN_PAT_2_CPT; ++ else ++ reg = DP | DP_LINK_TRAIN_PAT_2; + + /* channel eq pattern */ +- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, ++ if (!intel_dp_set_link_train(intel_encoder, reg, + DP_TRAINING_PATTERN_2, train_set, + false)) + break; +@@ -1068,7 +1119,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + ++tries; + } + +- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ reg = DP | DP_LINK_TRAIN_OFF_CPT; ++ else ++ reg = DP | DP_LINK_TRAIN_OFF; ++ ++ I915_WRITE(dp_priv->output_reg, reg); + POSTING_READ(dp_priv->output_reg); + intel_dp_aux_native_write_1(intel_encoder, + DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); +@@ -1077,7 +1133,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + static void + intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) + { +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + +@@ -1090,9 +1146,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) + udelay(100); + } + +- DP &= ~DP_LINK_TRAIN_MASK; +- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); +- POSTING_READ(dp_priv->output_reg); ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { ++ DP &= ~DP_LINK_TRAIN_MASK_CPT; ++ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); ++ POSTING_READ(dp_priv->output_reg); ++ } else { ++ DP &= ~DP_LINK_TRAIN_MASK; ++ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); ++ POSTING_READ(dp_priv->output_reg); ++ } + + udelay(17000); + +@@ -1102,18 +1164,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) + POSTING_READ(dp_priv->output_reg); + } + +-static void +-intel_dp_restore(struct drm_connector *connector) +-{ +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; +- +- if (dp_priv->save_DP & DP_PORT_EN) +- intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); +- else +- intel_dp_link_down(intel_encoder, dp_priv->save_DP); +-} +- + /* + * According to DP spec + * 5.1.2: +@@ -1144,7 +1194,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder) + static enum drm_connector_status + ironlake_dp_detect(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + enum drm_connector_status status; + +@@ -1156,6 +1207,8 @@ ironlake_dp_detect(struct drm_connector *connector) + if (dp_priv->dpcd[0] != 0) + status = connector_status_connected; + } ++ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], ++ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); + return status; + } + +@@ -1168,8 +1221,9 @@ ironlake_dp_detect(struct drm_connector *connector) + static enum drm_connector_status + intel_dp_detect(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint32_t temp, bit; +@@ -1180,16 +1234,6 @@ intel_dp_detect(struct drm_connector *connector) + if (HAS_PCH_SPLIT(dev)) + return ironlake_dp_detect(connector); + +- temp = I915_READ(PORT_HOTPLUG_EN); +- +- I915_WRITE(PORT_HOTPLUG_EN, +- temp | +- DPB_HOTPLUG_INT_EN | +- DPC_HOTPLUG_INT_EN | +- DPD_HOTPLUG_INT_EN); +- +- POSTING_READ(PORT_HOTPLUG_EN); +- + switch (dp_priv->output_reg) { + case DP_B: + bit = DPB_HOTPLUG_INT_STATUS; +@@ -1222,15 +1266,16 @@ intel_dp_detect(struct drm_connector *connector) + + static int intel_dp_get_modes(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* We should parse the EDID data and find out if it has an audio sink + */ + +- ret = intel_ddc_get_modes(intel_encoder); ++ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (ret) + return ret; + +@@ -1249,13 +1294,9 @@ static int intel_dp_get_modes(struct drm_connector *connector) + static void + intel_dp_destroy (struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- +- if (intel_encoder->i2c_bus) +- intel_i2c_destroy(intel_encoder->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_encoder); ++ kfree(connector); + } + + static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { +@@ -1268,8 +1309,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { + + static const struct drm_connector_funcs intel_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_dp_save, +- .restore = intel_dp_restore, + .detect = intel_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_dp_destroy, +@@ -1278,12 +1317,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { + static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { + .get_modes = intel_dp_get_modes, + .mode_valid = intel_dp_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_dp_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_dp_enc_funcs = { +@@ -1299,12 +1343,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) + intel_dp_check_link_status(intel_encoder); + } + ++/* Return which DP Port should be selected for Transcoder DP control */ ++int ++intel_trans_dp_port_sel (struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_mode_config *mode_config = &dev->mode_config; ++ struct drm_encoder *encoder; ++ struct intel_encoder *intel_encoder = NULL; ++ ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { ++ if (encoder->crtc != crtc) ++ continue; ++ ++ intel_encoder = enc_to_intel_encoder(encoder); ++ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; ++ return dp_priv->output_reg; ++ } ++ } ++ return -1; ++} ++ + void + intel_dp_init(struct drm_device *dev, int output_reg) + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_dp_priv *dp_priv; + const char *name = NULL; + +@@ -1313,13 +1380,21 @@ intel_dp_init(struct drm_device *dev, int output_reg) + if (!intel_encoder) + return; + ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ + dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); + +- connector = &intel_encoder->base; ++ connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_dp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + ++ connector->polled = DRM_CONNECTOR_POLL_HPD; ++ + if (output_reg == DP_A) + intel_encoder->type = INTEL_OUTPUT_EDP; + else +@@ -1349,7 +1424,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); + +- drm_mode_connector_attach_encoder(&intel_encoder->base, ++ drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); + drm_sysfs_connector_add(connector); + +@@ -1378,7 +1453,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) + break; + } + +- intel_dp_i2c_init(intel_encoder, name); ++ intel_dp_i2c_init(intel_encoder, intel_connector, name); + + intel_encoder->ddc_bus = &dp_priv->adapter; + intel_encoder->hot_plug = intel_dp_hot_plug; +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h +index e302537..df931f7 100644 +--- a/drivers/gpu/drm/i915/intel_drv.h ++++ b/drivers/gpu/drm/i915/intel_drv.h +@@ -96,8 +96,6 @@ struct intel_framebuffer { + + + struct intel_encoder { +- struct drm_connector base; +- + struct drm_encoder enc; + int type; + struct i2c_adapter *i2c_bus; +@@ -110,6 +108,11 @@ struct intel_encoder { + int clone_mask; + }; + ++struct intel_connector { ++ struct drm_connector base; ++ void *dev_priv; ++}; ++ + struct intel_crtc; + struct intel_overlay { + struct drm_device *dev; +@@ -149,17 +152,18 @@ struct intel_crtc { + bool lowfreq_avail; + struct intel_overlay *overlay; + struct intel_unpin_work *unpin_work; ++ int fdi_lanes; + }; + + #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) +-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) ++#define to_intel_connector(x) container_of(x, struct intel_connector, base) + #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) + #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) + + struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, + const char *name); + void intel_i2c_destroy(struct i2c_adapter *adapter); +-int intel_ddc_get_modes(struct intel_encoder *intel_encoder); ++int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); + extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); + void intel_i2c_quirk_set(struct drm_device *dev, bool enable); + void intel_i2c_reset_gmbus(struct drm_device *dev); +@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc); + extern void intel_encoder_prepare (struct drm_encoder *encoder); + extern void intel_encoder_commit (struct drm_encoder *encoder); + +-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); ++extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); + + extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, + struct drm_crtc *crtc); +@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + extern void intel_wait_for_vblank(struct drm_device *dev); + extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); + extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, + struct drm_display_mode *mode, + int *dpms_mode); + extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, + int dpms_mode); + + extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); + extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); + extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); +-extern int intelfb_probe(struct drm_device *dev); +-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); +-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); + extern void intelfb_restore(void); + extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno); +@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev); + extern void ironlake_enable_drps(struct drm_device *dev); + extern void ironlake_disable_drps(struct drm_device *dev); + +-extern int intel_framebuffer_create(struct drm_device *dev, +- struct drm_mode_fb_cmd *mode_cmd, +- struct drm_framebuffer **fb, +- struct drm_gem_object *obj); ++extern int intel_framebuffer_init(struct drm_device *dev, ++ struct intel_framebuffer *ifb, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object *obj); ++extern int intel_fbdev_init(struct drm_device *dev); ++extern void intel_fbdev_fini(struct drm_device *dev); + + extern void intel_prepare_page_flip(struct drm_device *dev, int plane); + extern void intel_finish_page_flip(struct drm_device *dev, int pipe); +@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data, + struct drm_file *file_priv); + extern int intel_overlay_attrs(struct drm_device *dev, void *data, + struct drm_file *file_priv); ++ ++extern void intel_fb_output_poll_changed(struct drm_device *dev); + #endif /* __INTEL_DRV_H__ */ +diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c +index ebf213c..227feca 100644 +--- a/drivers/gpu/drm/i915/intel_dvo.c ++++ b/drivers/gpu/drm/i915/intel_dvo.c +@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) + } + } + +-static void intel_dvo_save(struct drm_connector *connector) +-{ +- struct drm_i915_private *dev_priv = connector->dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_dvo_device *dvo = intel_encoder->dev_priv; +- +- /* Each output should probably just save the registers it touches, +- * but for now, use more overkill. +- */ +- dev_priv->saveDVOA = I915_READ(DVOA); +- dev_priv->saveDVOB = I915_READ(DVOB); +- dev_priv->saveDVOC = I915_READ(DVOC); +- +- dvo->dev_ops->save(dvo); +-} +- +-static void intel_dvo_restore(struct drm_connector *connector) +-{ +- struct drm_i915_private *dev_priv = connector->dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_dvo_device *dvo = intel_encoder->dev_priv; +- +- dvo->dev_ops->restore(dvo); +- +- I915_WRITE(DVOA, dev_priv->saveDVOA); +- I915_WRITE(DVOB, dev_priv->saveDVOB); +- I915_WRITE(DVOC, dev_priv->saveDVOC); +-} +- + static int intel_dvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) +@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, + */ + static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + return dvo->dev_ops->detect(dvo); +@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto + + static int intel_dvo_get_modes(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + /* We should probably have an i2c driver get_modes function for those +@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector) + * (TV-out, for example), but for now with just TMDS and LVDS, + * that's not the case. + */ +- intel_ddc_get_modes(intel_encoder); ++ intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (!list_empty(&connector->probed_modes)) + return 1; + +@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector) + + static void intel_dvo_destroy (struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_dvo_device *dvo = intel_encoder->dev_priv; +- +- if (dvo) { +- if (dvo->dev_ops->destroy) +- dvo->dev_ops->destroy(dvo); +- if (dvo->panel_fixed_mode) +- kfree(dvo->panel_fixed_mode); +- /* no need, in i830_dvoices[] now */ +- //kfree(dvo); +- } +- if (intel_encoder->i2c_bus) +- intel_i2c_destroy(intel_encoder->i2c_bus); +- if (intel_encoder->ddc_bus) +- intel_i2c_destroy(intel_encoder->ddc_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_encoder); +-} +- +-#ifdef RANDR_GET_CRTC_INTERFACE +-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_dvo_device *dvo = intel_encoder->dev_priv; +- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); +- +- return intel_pipe_to_crtc(pScrn, pipe); ++ kfree(connector); + } +-#endif + + static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { + .dpms = intel_dvo_dpms, +@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { + + static const struct drm_connector_funcs intel_dvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_dvo_save, +- .restore = intel_dvo_restore, + .detect = intel_dvo_detect, + .destroy = intel_dvo_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, +@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { + static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { + .mode_valid = intel_dvo_mode_valid, + .get_modes = intel_dvo_get_modes, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_dvo_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; ++ ++ if (dvo) { ++ if (dvo->dev_ops->destroy) ++ dvo->dev_ops->destroy(dvo); ++ if (dvo->panel_fixed_mode) ++ kfree(dvo->panel_fixed_mode); ++ } ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_dvo_enc_funcs = { +@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; + uint32_t dvo_reg = dvo->dvo_reg; + uint32_t dvo_val = I915_READ(dvo_reg); +@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector) + void intel_dvo_init(struct drm_device *dev) + { + struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_dvo_device *dvo; + struct i2c_adapter *i2cbus = NULL; + int ret = 0; +@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev) + if (!intel_encoder) + return; + ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ + /* Set up the DDC bus */ + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); + if (!intel_encoder->ddc_bus) +@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev) + + /* Now, try to find a controller */ + for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { +- struct drm_connector *connector = &intel_encoder->base; ++ struct drm_connector *connector = &intel_connector->base; + int gpio; + + dvo = &intel_dvo_devices[i]; +@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev) + drm_encoder_helper_add(&intel_encoder->enc, + &intel_dvo_helper_funcs); + +- drm_mode_connector_attach_encoder(&intel_encoder->base, ++ drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); + if (dvo->type == INTEL_DVO_CHIP_LVDS) { + /* For our LVDS chipsets, we should hopefully be able +@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev) + intel_i2c_destroy(i2cbus); + free_intel: + kfree(intel_encoder); ++ kfree(intel_connector); + } +diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c +index 8a0b3bc..c3c5052 100644 +--- a/drivers/gpu/drm/i915/intel_fb.c ++++ b/drivers/gpu/drm/i915/intel_fb.c +@@ -44,9 +44,10 @@ + #include "i915_drm.h" + #include "i915_drv.h" + +-struct intelfb_par { ++struct intel_fbdev { + struct drm_fb_helper helper; +- struct intel_framebuffer *intel_fb; ++ struct intel_framebuffer ifb; ++ struct list_head fbdev_list; + struct drm_display_mode *our_mode; + }; + +@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = { + .fb_setcmap = drm_fb_helper_setcmap, + }; + +-static struct drm_fb_helper_funcs intel_fb_helper_funcs = { +- .gamma_set = intel_crtc_fb_gamma_set, +- .gamma_get = intel_crtc_fb_gamma_get, +-}; +- +- +-/** +- * Currently it is assumed that the old framebuffer is reused. +- * +- * LOCKING +- * caller should hold the mode config lock. +- * +- */ +-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc) +-{ +- struct fb_info *info; +- struct drm_framebuffer *fb; +- struct drm_display_mode *mode = crtc->desired_mode; +- +- fb = crtc->fb; +- if (!fb) +- return 1; +- +- info = fb->fbdev; +- if (!info) +- return 1; +- +- if (!mode) +- return 1; +- +- info->var.xres = mode->hdisplay; +- info->var.right_margin = mode->hsync_start - mode->hdisplay; +- info->var.hsync_len = mode->hsync_end - mode->hsync_start; +- info->var.left_margin = mode->htotal - mode->hsync_end; +- info->var.yres = mode->vdisplay; +- info->var.lower_margin = mode->vsync_start - mode->vdisplay; +- info->var.vsync_len = mode->vsync_end - mode->vsync_start; +- info->var.upper_margin = mode->vtotal - mode->vsync_end; +- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; +- /* avoid overflow */ +- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; +- +- return 0; +-} +-EXPORT_SYMBOL(intelfb_resize); +- +-static int intelfb_create(struct drm_device *dev, uint32_t fb_width, +- uint32_t fb_height, uint32_t surface_width, +- uint32_t surface_height, +- uint32_t surface_depth, uint32_t surface_bpp, +- struct drm_framebuffer **fb_p) ++static int intelfb_create(struct intel_fbdev *ifbdev, ++ struct drm_fb_helper_surface_size *sizes) + { ++ struct drm_device *dev = ifbdev->helper.dev; + struct fb_info *info; +- struct intelfb_par *par; + struct drm_framebuffer *fb; +- struct intel_framebuffer *intel_fb; + struct drm_mode_fb_cmd mode_cmd; + struct drm_gem_object *fbo = NULL; + struct drm_i915_gem_object *obj_priv; +@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, + int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; + + /* we don't do packed 24bpp */ +- if (surface_bpp == 24) +- surface_bpp = 32; ++ if (sizes->surface_bpp == 24) ++ sizes->surface_bpp = 32; + +- mode_cmd.width = surface_width; +- mode_cmd.height = surface_height; ++ mode_cmd.width = sizes->surface_width; ++ mode_cmd.height = sizes->surface_height; + +- mode_cmd.bpp = surface_bpp; ++ mode_cmd.bpp = sizes->surface_bpp; + mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); +- mode_cmd.depth = surface_depth; ++ mode_cmd.depth = sizes->surface_depth; + + size = mode_cmd.pitch * mode_cmd.height; + size = ALIGN(size, PAGE_SIZE); +- fbo = drm_gem_object_alloc(dev, size); ++ fbo = i915_gem_alloc_object(dev, size); + if (!fbo) { + DRM_ERROR("failed to allocate framebuffer\n"); + ret = -ENOMEM; +@@ -155,47 +105,43 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, + } + + /* Flush everything out, we'll be doing GTT only from now on */ +- i915_gem_object_set_to_gtt_domain(fbo, 1); +- +- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); ++ ret = i915_gem_object_set_to_gtt_domain(fbo, 1); + if (ret) { +- DRM_ERROR("failed to allocate fb.\n"); ++ DRM_ERROR("failed to bind fb: %d.\n", ret); + goto out_unpin; + } + +- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); +- +- intel_fb = to_intel_framebuffer(fb); +- *fb_p = fb; +- +- info = framebuffer_alloc(sizeof(struct intelfb_par), device); ++ info = framebuffer_alloc(0, device); + if (!info) { + ret = -ENOMEM; + goto out_unpin; + } + +- par = info->par; ++ info->par = ifbdev; + +- par->helper.funcs = &intel_fb_helper_funcs; +- par->helper.dev = dev; +- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, +- INTELFB_CONN_LIMIT); +- if (ret) +- goto out_unref; ++ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); ++ ++ fb = &ifbdev->ifb.base; ++ ++ ifbdev->helper.fb = fb; ++ ifbdev->helper.fbdev = info; + + strcpy(info->fix.id, "inteldrmfb"); + + info->flags = FBINFO_DEFAULT; +- + info->fbops = &intelfb_ops; + +- + /* setup aperture base/size for vesafb takeover */ +- info->aperture_base = dev->mode_config.fb_base; ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ ret = -ENOMEM; ++ goto out_unpin; ++ } ++ info->apertures->ranges[0].base = dev->mode_config.fb_base; + if (IS_I9XX(dev)) +- info->aperture_size = pci_resource_len(dev->pdev, 2); ++ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); + else +- info->aperture_size = pci_resource_len(dev->pdev, 0); ++ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); + + info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; + info->fix.smem_len = size; +@@ -208,12 +154,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, + ret = -ENOSPC; + goto out_unpin; + } ++ ++ ret = fb_alloc_cmap(&info->cmap, 256, 0); ++ if (ret) { ++ ret = -ENOMEM; ++ goto out_unpin; ++ } + info->screen_size = size; + + // memset(info->screen_base, 0, size); + + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); +- drm_fb_helper_fill_var(info, fb, fb_width, fb_height); ++ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); + + /* FIXME: we really shouldn't expose mmio space at all */ + info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); +@@ -225,14 +177,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + +- fb->fbdev = info; +- +- par->intel_fb = intel_fb; +- +- /* To allow resizeing without swapping buffers */ + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", +- intel_fb->base.width, intel_fb->base.height, +- obj_priv->gtt_offset, fbo); ++ fb->width, fb->height, ++ obj_priv->gtt_offset, fbo); ++ + + mutex_unlock(&dev->struct_mutex); + vga_switcheroo_client_fb_set(dev->pdev, info); +@@ -247,35 +195,92 @@ out: + return ret; + } + +-int intelfb_probe(struct drm_device *dev) ++static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, ++ struct drm_fb_helper_surface_size *sizes) + { ++ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; ++ int new_fb = 0; + int ret; + +- DRM_DEBUG_KMS("\n"); +- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); +- return ret; ++ if (!helper->fb) { ++ ret = intelfb_create(ifbdev, sizes); ++ if (ret) ++ return ret; ++ new_fb = 1; ++ } ++ return new_fb; + } +-EXPORT_SYMBOL(intelfb_probe); + +-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) ++static struct drm_fb_helper_funcs intel_fb_helper_funcs = { ++ .gamma_set = intel_crtc_fb_gamma_set, ++ .gamma_get = intel_crtc_fb_gamma_get, ++ .fb_probe = intel_fb_find_or_create_single, ++}; ++ ++int intel_fbdev_destroy(struct drm_device *dev, ++ struct intel_fbdev *ifbdev) + { + struct fb_info *info; ++ struct intel_framebuffer *ifb = &ifbdev->ifb; + +- if (!fb) +- return -EINVAL; +- +- info = fb->fbdev; +- +- if (info) { +- struct intelfb_par *par = info->par; ++ if (ifbdev->helper.fbdev) { ++ info = ifbdev->helper.fbdev; + unregister_framebuffer(info); + iounmap(info->screen_base); +- if (info->par) +- drm_fb_helper_free(&par->helper); ++ if (info->cmap.len) ++ fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + ++ drm_fb_helper_fini(&ifbdev->helper); ++ ++ drm_framebuffer_cleanup(&ifb->base); ++ if (ifb->obj) ++ drm_gem_object_unreference_unlocked(ifb->obj); ++ ++ return 0; ++} ++ ++int intel_fbdev_init(struct drm_device *dev) ++{ ++ struct intel_fbdev *ifbdev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); ++ if (!ifbdev) ++ return -ENOMEM; ++ ++ dev_priv->fbdev = ifbdev; ++ ifbdev->helper.funcs = &intel_fb_helper_funcs; ++ ++ ret = drm_fb_helper_init(dev, &ifbdev->helper, ++ dev_priv->num_pipe, ++ INTELFB_CONN_LIMIT); ++ if (ret) { ++ kfree(ifbdev); ++ return ret; ++ } ++ ++ drm_fb_helper_single_add_all_connectors(&ifbdev->helper); ++ drm_fb_helper_initial_config(&ifbdev->helper, 32); + return 0; + } +-EXPORT_SYMBOL(intelfb_remove); ++ ++void intel_fbdev_fini(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ if (!dev_priv->fbdev) ++ return; ++ ++ intel_fbdev_destroy(dev, dev_priv->fbdev); ++ kfree(dev_priv->fbdev); ++ dev_priv->fbdev = NULL; ++} + MODULE_LICENSE("GPL and additional rights"); ++ ++void intel_fb_output_poll_changed(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); ++} +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c +index 48cade0..83bd764 100644 +--- a/drivers/gpu/drm/i915/intel_hdmi.c ++++ b/drivers/gpu/drm/i915/intel_hdmi.c +@@ -39,7 +39,6 @@ + + struct intel_hdmi_priv { + u32 sdvox_reg; +- u32 save_SDVOX; + bool has_hdmi_sink; + }; + +@@ -60,11 +59,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, + SDVO_VSYNC_ACTIVE_HIGH | + SDVO_HSYNC_ACTIVE_HIGH; + +- if (hdmi_priv->has_hdmi_sink) ++ if (hdmi_priv->has_hdmi_sink) { + sdvox |= SDVO_AUDIO_ENABLE; ++ if (HAS_PCH_CPT(dev)) ++ sdvox |= HDMI_MODE_SELECT; ++ } + +- if (intel_crtc->pipe == 1) +- sdvox |= SDVO_PIPE_B_SELECT; ++ if (intel_crtc->pipe == 1) { ++ if (HAS_PCH_CPT(dev)) ++ sdvox |= PORT_TRANS_B_SEL_CPT; ++ else ++ sdvox |= SDVO_PIPE_B_SELECT; ++ } + + I915_WRITE(hdmi_priv->sdvox_reg, sdvox); + POSTING_READ(hdmi_priv->sdvox_reg); +@@ -106,27 +112,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) + } + } + +-static void intel_hdmi_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; +- +- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); +-} +- +-static void intel_hdmi_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; +- +- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); +- POSTING_READ(hdmi_priv->sdvox_reg); +-} +- + static int intel_hdmi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +@@ -151,13 +136,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, + static enum drm_connector_status + intel_hdmi_detect(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + struct edid *edid = NULL; + enum drm_connector_status status = connector_status_disconnected; + + hdmi_priv->has_hdmi_sink = false; +- edid = drm_get_edid(&intel_encoder->base, ++ edid = drm_get_edid(connector, + intel_encoder->ddc_bus); + + if (edid) { +@@ -165,7 +151,7 @@ intel_hdmi_detect(struct drm_connector *connector) + status = connector_status_connected; + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + } +- intel_encoder->base.display_info.raw_edid = NULL; ++ connector->display_info.raw_edid = NULL; + kfree(edid); + } + +@@ -174,24 +160,21 @@ intel_hdmi_detect(struct drm_connector *connector) + + static int intel_hdmi_get_modes(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + /* We should parse the EDID data and find out if it's an HDMI sink so + * we can send audio to it. + */ + +- return intel_ddc_get_modes(intel_encoder); ++ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + } + + static void intel_hdmi_destroy(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- +- if (intel_encoder->i2c_bus) +- intel_i2c_destroy(intel_encoder->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_encoder); ++ kfree(connector); + } + + static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { +@@ -204,8 +187,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { + + static const struct drm_connector_funcs intel_hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_hdmi_save, +- .restore = intel_hdmi_restore, + .detect = intel_hdmi_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_hdmi_destroy, +@@ -214,12 +195,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { + static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { + .get_modes = intel_hdmi_get_modes, + .mode_valid = intel_hdmi_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { +@@ -231,21 +217,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_hdmi_priv *hdmi_priv; + + intel_encoder = kcalloc(sizeof(struct intel_encoder) + + sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); + if (!intel_encoder) + return; ++ ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ + hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); + +- connector = &intel_encoder->base; ++ connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); + + intel_encoder->type = INTEL_OUTPUT_HDMI; + ++ connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); +@@ -285,7 +280,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); + +- drm_mode_connector_attach_encoder(&intel_encoder->base, ++ drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_encoder->enc); + drm_sysfs_connector_add(connector); + +@@ -303,6 +298,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) + err_connector: + drm_connector_cleanup(connector); + kfree(intel_encoder); ++ kfree(intel_connector); + + return; + } +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c +index b66806a..6a1accd 100644 +--- a/drivers/gpu/drm/i915/intel_lvds.c ++++ b/drivers/gpu/drm/i915/intel_lvds.c +@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) + /* XXX: We never power down the LVDS pairs. */ + } + +-static void intel_lvds_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; +- u32 pwm_ctl_reg; +- +- if (HAS_PCH_SPLIT(dev)) { +- pp_on_reg = PCH_PP_ON_DELAYS; +- pp_off_reg = PCH_PP_OFF_DELAYS; +- pp_ctl_reg = PCH_PP_CONTROL; +- pp_div_reg = PCH_PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CPU_CTL; +- } else { +- pp_on_reg = PP_ON_DELAYS; +- pp_off_reg = PP_OFF_DELAYS; +- pp_ctl_reg = PP_CONTROL; +- pp_div_reg = PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CTL; +- } +- +- dev_priv->savePP_ON = I915_READ(pp_on_reg); +- dev_priv->savePP_OFF = I915_READ(pp_off_reg); +- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg); +- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg); +- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg); +- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & +- BACKLIGHT_DUTY_CYCLE_MASK); +- +- /* +- * If the light is off at server startup, just make it full brightness +- */ +- if (dev_priv->backlight_duty_cycle == 0) +- dev_priv->backlight_duty_cycle = +- intel_lvds_get_max_backlight(dev); +-} +- +-static void intel_lvds_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; +- u32 pwm_ctl_reg; +- +- if (HAS_PCH_SPLIT(dev)) { +- pp_on_reg = PCH_PP_ON_DELAYS; +- pp_off_reg = PCH_PP_OFF_DELAYS; +- pp_ctl_reg = PCH_PP_CONTROL; +- pp_div_reg = PCH_PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CPU_CTL; +- } else { +- pp_on_reg = PP_ON_DELAYS; +- pp_off_reg = PP_OFF_DELAYS; +- pp_ctl_reg = PP_CONTROL; +- pp_div_reg = PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CTL; +- } +- +- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL); +- I915_WRITE(pp_on_reg, dev_priv->savePP_ON); +- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF); +- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR); +- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL); +- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) +- intel_lvds_set_power(dev, true); +- else +- intel_lvds_set_power(dev, false); +-} +- + static int intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect + static int intel_lvds_get_modes(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + + if (dev_priv->lvds_edid_good) { +- ret = intel_ddc_get_modes(intel_encoder); ++ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + + if (ret) + return ret; +@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, + static void intel_lvds_destroy(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct drm_i915_private *dev_priv = dev->dev_private; + +- if (intel_encoder->ddc_bus) +- intel_i2c_destroy(intel_encoder->ddc_bus); + if (dev_priv->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&dev_priv->lid_notifier); + drm_sysfs_connector_remove(connector); +@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector, + uint64_t value) + { + struct drm_device *dev = connector->dev; +- struct intel_encoder *intel_encoder = +- to_intel_encoder(connector); + + if (property == dev->mode_config.scaling_mode_property && + connector->encoder) { + struct drm_crtc *crtc = connector->encoder->crtc; ++ struct drm_encoder *encoder = connector->encoder; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; ++ + if (value == DRM_MODE_SCALE_NONE) { + DRM_DEBUG_KMS("no scaling not supported\n"); + return 0; +@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { + static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { + .get_modes = intel_lvds_get_modes, + .mode_valid = intel_lvds_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static const struct drm_connector_funcs intel_lvds_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_lvds_save, +- .restore = intel_lvds_restore, + .detect = intel_lvds_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_lvds_set_property, +@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { + + static void intel_lvds_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_lvds_enc_funcs = { +@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_display_mode *scan; /* *modes, *bios_mode; */ +@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev) + return; + } + +- connector = &intel_encoder->base; ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ ++ connector = &intel_connector->base; + encoder = &intel_encoder->enc; +- drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, ++ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + + drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS); + +- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); ++ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + intel_encoder->type = INTEL_OUTPUT_LVDS; + + intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); + intel_encoder->crtc_mask = (1 << 1); ++ if (IS_I965G(dev)) ++ intel_encoder->crtc_mask |= (1 << 0); + drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); + drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; +@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev) + * the initial panel fitting mode will be FULL_SCREEN. + */ + +- drm_connector_attach_property(&intel_encoder->base, ++ drm_connector_attach_property(&intel_connector->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_FULLSCREEN); + lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; +@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev) + */ + dev_priv->lvds_edid_good = true; + +- if (!intel_ddc_get_modes(intel_encoder)) ++ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) + dev_priv->lvds_edid_good = false; + + list_for_each_entry(scan, &connector->probed_modes, head) { +@@ -1151,4 +1093,5 @@ failed: + drm_connector_cleanup(connector); + drm_encoder_cleanup(encoder); + kfree(intel_encoder); ++ kfree(intel_connector); + } +diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c +index 8e5c83b..4b1fd3d 100644 +--- a/drivers/gpu/drm/i915/intel_modes.c ++++ b/drivers/gpu/drm/i915/intel_modes.c +@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) + } + }; + +- intel_i2c_quirk_set(intel_encoder->base.dev, true); ++ intel_i2c_quirk_set(intel_encoder->enc.dev, true); + ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); +- intel_i2c_quirk_set(intel_encoder->base.dev, false); ++ intel_i2c_quirk_set(intel_encoder->enc.dev, false); + if (ret == 2) + return true; + +@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) + /** + * intel_ddc_get_modes - get modelist from monitor + * @connector: DRM connector device to use ++ * @adapter: i2c adapter + * + * Fetch the EDID information from @connector using the DDC bus. + */ +-int intel_ddc_get_modes(struct intel_encoder *intel_encoder) ++int intel_ddc_get_modes(struct drm_connector *connector, ++ struct i2c_adapter *adapter) + { + struct edid *edid; + int ret = 0; + +- intel_i2c_quirk_set(intel_encoder->base.dev, true); +- edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); +- intel_i2c_quirk_set(intel_encoder->base.dev, false); ++ intel_i2c_quirk_set(connector->dev, true); ++ edid = drm_get_edid(connector, adapter); ++ intel_i2c_quirk_set(connector->dev, false); + if (edid) { +- drm_mode_connector_update_edid_property(&intel_encoder->base, +- edid); +- ret = drm_add_edid_modes(&intel_encoder->base, edid); +- intel_encoder->base.display_info.raw_edid = NULL; ++ drm_mode_connector_update_edid_property(connector, edid); ++ ret = drm_add_edid_modes(connector, edid); ++ connector->display_info.raw_edid = NULL; + kfree(edid); + } + +diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c +index 6d524a1..d7ad513 100644 +--- a/drivers/gpu/drm/i915/intel_overlay.c ++++ b/drivers/gpu/drm/i915/intel_overlay.c +@@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) + static int intel_overlay_on(struct intel_overlay *overlay) + { + struct drm_device *dev = overlay->dev; +- drm_i915_private_t *dev_priv = dev->dev_private; + int ret; +- RING_LOCALS; ++ drm_i915_private_t *dev_priv = dev->dev_private; + + BUG_ON(overlay->active); + +@@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay) + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = ++ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + +- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); ++ ret = i915_do_wait_request(dev, ++ overlay->last_flip_req, 1, &dev_priv->render_ring); + if (ret != 0) + return ret; + +@@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, + drm_i915_private_t *dev_priv = dev->dev_private; + u32 flip_addr = overlay->flip_addr; + u32 tmp; +- RING_LOCALS; + + BUG_ON(!overlay->active); + +@@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay, + OUT_RING(flip_addr); + ADVANCE_LP_RING(); + +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = ++ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + } + + static int intel_overlay_wait_flip(struct intel_overlay *overlay) +@@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + u32 tmp; +- RING_LOCALS; + + if (overlay->last_flip_req != 0) { +- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); ++ ret = i915_do_wait_request(dev, overlay->last_flip_req, ++ 1, &dev_priv->render_ring); + if (ret == 0) { + overlay->last_flip_req = 0; + +@@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = ++ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + +- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); ++ ret = i915_do_wait_request(dev, overlay->last_flip_req, ++ 1, &dev_priv->render_ring); + if (ret != 0) + return ret; + +@@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) + { + u32 flip_addr = overlay->flip_addr; + struct drm_device *dev = overlay->dev; +- drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_private_t *dev_priv = dev->dev_private; + int ret; +- RING_LOCALS; + + BUG_ON(!overlay->active); + +@@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = ++ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + +- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); ++ ret = i915_do_wait_request(dev, overlay->last_flip_req, ++ 1, &dev_priv->render_ring); + if (ret != 0) + return ret; + +@@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = ++ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + +- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); ++ ret = i915_do_wait_request(dev, overlay->last_flip_req, ++ 1, &dev_priv->render_ring); + if (ret != 0) + return ret; + +@@ -373,7 +379,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) + + /* never have the overlay hw on without showing a frame */ + BUG_ON(!overlay->vid_bo); +- obj = overlay->vid_bo->obj; ++ obj = &overlay->vid_bo->base; + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); +@@ -390,28 +396,29 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, + int interruptible) + { + struct drm_device *dev = overlay->dev; +- drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; ++ drm_i915_private_t *dev_priv = dev->dev_private; + u32 flip_addr; + int ret; +- RING_LOCALS; + + if (overlay->hw_wedged == HW_WEDGED) + return -EIO; + + if (overlay->last_flip_req == 0) { +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = ++ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + } + +- ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); ++ ret = i915_do_wait_request(dev, overlay->last_flip_req, ++ interruptible, &dev_priv->render_ring); + if (ret != 0) + return ret; + + switch (overlay->hw_wedged) { + case RELEASE_OLD_VID: +- obj = overlay->old_vid_bo->obj; ++ obj = &overlay->old_vid_bo->base; + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + overlay->old_vid_bo = NULL; +@@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + +- overlay->last_flip_req = i915_add_request(dev, NULL, 0); ++ overlay->last_flip_req = i915_add_request(dev, NULL, ++ 0, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + + ret = i915_do_wait_request(dev, overlay->last_flip_req, +- interruptible); ++ interruptible, &dev_priv->render_ring); + if (ret != 0) + return ret; + +@@ -467,7 +475,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) + if (ret != 0) + return ret; + +- obj = overlay->old_vid_bo->obj; ++ obj = &overlay->old_vid_bo->base; + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + overlay->old_vid_bo = NULL; +@@ -1341,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev) + return; + overlay->dev = dev; + +- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); ++ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); + if (!reg_bo) + goto out_free; + overlay->reg_bo = to_intel_bo(reg_bo); +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c +new file mode 100644 +index 0000000..cea4f1a +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c +@@ -0,0 +1,849 @@ ++/* ++ * Copyright © 2008-2010 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * Zou Nan hai ++ * Xiang Hai hao ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drv.h" ++#include "i915_drm.h" ++#include "i915_trace.h" ++ ++static void ++render_ring_flush(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ u32 invalidate_domains, ++ u32 flush_domains) ++{ ++#if WATCH_EXEC ++ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, ++ invalidate_domains, flush_domains); ++#endif ++ u32 cmd; ++ trace_i915_gem_request_flush(dev, ring->next_seqno, ++ invalidate_domains, flush_domains); ++ ++ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { ++ /* ++ * read/write caches: ++ * ++ * I915_GEM_DOMAIN_RENDER is always invalidated, but is ++ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is ++ * also flushed at 2d versus 3d pipeline switches. ++ * ++ * read-only caches: ++ * ++ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if ++ * MI_READ_FLUSH is set, and is always flushed on 965. ++ * ++ * I915_GEM_DOMAIN_COMMAND may not exist? ++ * ++ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is ++ * invalidated when MI_EXE_FLUSH is set. ++ * ++ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is ++ * invalidated with every MI_FLUSH. ++ * ++ * TLBs: ++ * ++ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND ++ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and ++ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER ++ * are flushed at any MI_FLUSH. ++ */ ++ ++ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; ++ if ((invalidate_domains|flush_domains) & ++ I915_GEM_DOMAIN_RENDER) ++ cmd &= ~MI_NO_WRITE_FLUSH; ++ if (!IS_I965G(dev)) { ++ /* ++ * On the 965, the sampler cache always gets flushed ++ * and this bit is reserved. ++ */ ++ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) ++ cmd |= MI_READ_FLUSH; ++ } ++ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) ++ cmd |= MI_EXE_FLUSH; ++ ++#if WATCH_EXEC ++ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); ++#endif ++ intel_ring_begin(dev, ring, 8); ++ intel_ring_emit(dev, ring, cmd); ++ intel_ring_emit(dev, ring, MI_NOOP); ++ intel_ring_advance(dev, ring); ++ } ++} ++ ++static unsigned int render_ring_get_head(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ return I915_READ(PRB0_HEAD) & HEAD_ADDR; ++} ++ ++static unsigned int render_ring_get_tail(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ return I915_READ(PRB0_TAIL) & TAIL_ADDR; ++} ++ ++static unsigned int render_ring_get_active_head(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; ++ ++ return I915_READ(acthd_reg); ++} ++ ++static void render_ring_advance_ring(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ I915_WRITE(PRB0_TAIL, ring->tail); ++} ++ ++static int init_ring_common(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ u32 head; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ obj_priv = to_intel_bo(ring->gem_object); ++ ++ /* Stop the ring if it's running. */ ++ I915_WRITE(ring->regs.ctl, 0); ++ I915_WRITE(ring->regs.head, 0); ++ I915_WRITE(ring->regs.tail, 0); ++ ++ /* Initialize the ring. */ ++ I915_WRITE(ring->regs.start, obj_priv->gtt_offset); ++ head = ring->get_head(dev, ring); ++ ++ /* G45 ring initialization fails to reset head to zero */ ++ if (head != 0) { ++ DRM_ERROR("%s head not reset to zero " ++ "ctl %08x head %08x tail %08x start %08x\n", ++ ring->name, ++ I915_READ(ring->regs.ctl), ++ I915_READ(ring->regs.head), ++ I915_READ(ring->regs.tail), ++ I915_READ(ring->regs.start)); ++ ++ I915_WRITE(ring->regs.head, 0); ++ ++ DRM_ERROR("%s head forced to zero " ++ "ctl %08x head %08x tail %08x start %08x\n", ++ ring->name, ++ I915_READ(ring->regs.ctl), ++ I915_READ(ring->regs.head), ++ I915_READ(ring->regs.tail), ++ I915_READ(ring->regs.start)); ++ } ++ ++ I915_WRITE(ring->regs.ctl, ++ ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) ++ | RING_NO_REPORT | RING_VALID); ++ ++ head = I915_READ(ring->regs.head) & HEAD_ADDR; ++ /* If the head is still not zero, the ring is dead */ ++ if (head != 0) { ++ DRM_ERROR("%s initialization failed " ++ "ctl %08x head %08x tail %08x start %08x\n", ++ ring->name, ++ I915_READ(ring->regs.ctl), ++ I915_READ(ring->regs.head), ++ I915_READ(ring->regs.tail), ++ I915_READ(ring->regs.start)); ++ return -EIO; ++ } ++ ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ i915_kernel_lost_context(dev); ++ else { ++ ring->head = ring->get_head(dev, ring); ++ ring->tail = ring->get_tail(dev, ring); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->size; ++ } ++ return 0; ++} ++ ++static int init_render_ring(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret = init_ring_common(dev, ring); ++ if (IS_I9XX(dev) && !IS_GEN3(dev)) { ++ I915_WRITE(MI_MODE, ++ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); ++ } ++ return ret; ++} ++ ++#define PIPE_CONTROL_FLUSH(addr) \ ++do { \ ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ ++ PIPE_CONTROL_DEPTH_STALL | 2); \ ++ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ ++ OUT_RING(0); \ ++ OUT_RING(0); \ ++} while (0) ++ ++/** ++ * Creates a new sequence number, emitting a write of it to the status page ++ * plus an interrupt, which will trigger i915_user_interrupt_handler. ++ * ++ * Must be called with struct_lock held. ++ * ++ * Returned sequence numbers are nonzero on success. ++ */ ++static u32 ++render_ring_add_request(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ struct drm_file *file_priv, ++ u32 flush_domains) ++{ ++ u32 seqno; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ seqno = intel_ring_get_seqno(dev, ring); ++ ++ if (IS_GEN6(dev)) { ++ BEGIN_LP_RING(6); ++ OUT_RING(GFX_OP_PIPE_CONTROL | 3); ++ OUT_RING(PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | ++ PIPE_CONTROL_NOTIFY); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else if (HAS_PIPE_CONTROL(dev)) { ++ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; ++ ++ /* ++ * Workaround qword write incoherence by flushing the ++ * PIPE_NOTIFY buffers out to memory before requesting ++ * an interrupt. ++ */ ++ BEGIN_LP_RING(32); ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; /* write to separate cachelines */ ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | ++ PIPE_CONTROL_NOTIFY); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(seqno); ++ ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ } ++ return seqno; ++} ++ ++static u32 ++render_ring_get_gem_seqno(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ if (HAS_PIPE_CONTROL(dev)) ++ return ((volatile u32 *)(dev_priv->seqno_page))[0]; ++ else ++ return intel_read_status_page(ring, I915_GEM_HWS_INDEX); ++} ++ ++static void ++render_ring_get_user_irq(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ unsigned long irqflags; ++ ++ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); ++ if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { ++ if (HAS_PCH_SPLIT(dev)) ++ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); ++ else ++ i915_enable_irq(dev_priv, I915_USER_INTERRUPT); ++ } ++ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); ++} ++ ++static void ++render_ring_put_user_irq(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ unsigned long irqflags; ++ ++ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); ++ BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); ++ if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { ++ if (HAS_PCH_SPLIT(dev)) ++ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); ++ else ++ i915_disable_irq(dev_priv, I915_USER_INTERRUPT); ++ } ++ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); ++} ++ ++static void render_setup_status_page(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ if (IS_GEN6(dev)) { ++ I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); ++ I915_READ(HWS_PGA_GEN6); /* posting read */ ++ } else { ++ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); ++ I915_READ(HWS_PGA); /* posting read */ ++ } ++ ++} ++ ++void ++bsd_ring_flush(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ u32 invalidate_domains, ++ u32 flush_domains) ++{ ++ intel_ring_begin(dev, ring, 8); ++ intel_ring_emit(dev, ring, MI_FLUSH); ++ intel_ring_emit(dev, ring, MI_NOOP); ++ intel_ring_advance(dev, ring); ++} ++ ++static inline unsigned int bsd_ring_get_head(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; ++} ++ ++static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; ++} ++ ++static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ return I915_READ(BSD_RING_ACTHD); ++} ++ ++static inline void bsd_ring_advance_ring(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ I915_WRITE(BSD_RING_TAIL, ring->tail); ++} ++ ++static int init_bsd_ring(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ return init_ring_common(dev, ring); ++} ++ ++static u32 ++bsd_ring_add_request(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ struct drm_file *file_priv, ++ u32 flush_domains) ++{ ++ u32 seqno; ++ seqno = intel_ring_get_seqno(dev, ring); ++ intel_ring_begin(dev, ring, 4); ++ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); ++ intel_ring_emit(dev, ring, ++ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); ++ intel_ring_emit(dev, ring, seqno); ++ intel_ring_emit(dev, ring, MI_USER_INTERRUPT); ++ intel_ring_advance(dev, ring); ++ ++ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); ++ ++ return seqno; ++} ++ ++static void bsd_setup_status_page(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); ++ I915_READ(BSD_HWS_PGA); ++} ++ ++static void ++bsd_ring_get_user_irq(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ /* do nothing */ ++} ++static void ++bsd_ring_put_user_irq(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ /* do nothing */ ++} ++ ++static u32 ++bsd_ring_get_gem_seqno(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ return intel_read_status_page(ring, I915_GEM_HWS_INDEX); ++} ++ ++static int ++bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ struct drm_i915_gem_execbuffer2 *exec, ++ struct drm_clip_rect *cliprects, ++ uint64_t exec_offset) ++{ ++ uint32_t exec_start; ++ exec_start = (uint32_t) exec_offset + exec->batch_start_offset; ++ intel_ring_begin(dev, ring, 2); ++ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | ++ (2 << 6) | MI_BATCH_NON_SECURE_I965); ++ intel_ring_emit(dev, ring, exec_start); ++ intel_ring_advance(dev, ring); ++ return 0; ++} ++ ++ ++static int ++render_ring_dispatch_gem_execbuffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ struct drm_i915_gem_execbuffer2 *exec, ++ struct drm_clip_rect *cliprects, ++ uint64_t exec_offset) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int nbox = exec->num_cliprects; ++ int i = 0, count; ++ uint32_t exec_start, exec_len; ++ exec_start = (uint32_t) exec_offset + exec->batch_start_offset; ++ exec_len = (uint32_t) exec->batch_len; ++ ++ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ int ret = i915_emit_box(dev, cliprects, i, ++ exec->DR1, exec->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ if (IS_I830(dev) || IS_845G(dev)) { ++ intel_ring_begin(dev, ring, 4); ++ intel_ring_emit(dev, ring, MI_BATCH_BUFFER); ++ intel_ring_emit(dev, ring, ++ exec_start | MI_BATCH_NON_SECURE); ++ intel_ring_emit(dev, ring, exec_start + exec_len - 4); ++ intel_ring_emit(dev, ring, 0); ++ } else { ++ intel_ring_begin(dev, ring, 4); ++ if (IS_I965G(dev)) { ++ intel_ring_emit(dev, ring, ++ MI_BATCH_BUFFER_START | (2 << 6) ++ | MI_BATCH_NON_SECURE_I965); ++ intel_ring_emit(dev, ring, exec_start); ++ } else { ++ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START ++ | (2 << 6)); ++ intel_ring_emit(dev, ring, exec_start | ++ MI_BATCH_NON_SECURE); ++ } ++ } ++ intel_ring_advance(dev, ring); ++ } ++ ++ /* XXX breadcrumb */ ++ return 0; ++} ++ ++static void cleanup_status_page(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = ring->status_page.obj; ++ if (obj == NULL) ++ return; ++ obj_priv = to_intel_bo(obj); ++ ++ kunmap(obj_priv->pages[0]); ++ i915_gem_object_unpin(obj); ++ drm_gem_object_unreference(obj); ++ ring->status_page.obj = NULL; ++ ++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); ++} ++ ++static int init_status_page(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ obj = i915_gem_alloc_object(dev, 4096); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate status page\n"); ++ ret = -ENOMEM; ++ goto err; ++ } ++ obj_priv = to_intel_bo(obj); ++ obj_priv->agp_type = AGP_USER_CACHED_MEMORY; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret != 0) { ++ goto err_unref; ++ } ++ ++ ring->status_page.gfx_addr = obj_priv->gtt_offset; ++ ring->status_page.page_addr = kmap(obj_priv->pages[0]); ++ if (ring->status_page.page_addr == NULL) { ++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); ++ goto err_unpin; ++ } ++ ring->status_page.obj = obj; ++ memset(ring->status_page.page_addr, 0, PAGE_SIZE); ++ ++ ring->setup_status_page(dev, ring); ++ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ++ ring->name, ring->status_page.gfx_addr); ++ ++ return 0; ++ ++err_unpin: ++ i915_gem_object_unpin(obj); ++err_unref: ++ drm_gem_object_unreference(obj); ++err: ++ return ret; ++} ++ ++ ++int intel_init_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ int ret; ++ struct drm_i915_gem_object *obj_priv; ++ struct drm_gem_object *obj; ++ ring->dev = dev; ++ ++ if (I915_NEED_GFX_HWS(dev)) { ++ ret = init_status_page(dev, ring); ++ if (ret) ++ return ret; ++ } ++ ++ obj = i915_gem_alloc_object(dev, ring->size); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate ringbuffer\n"); ++ ret = -ENOMEM; ++ goto cleanup; ++ } ++ ++ ring->gem_object = obj; ++ ++ ret = i915_gem_object_pin(obj, ring->alignment); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ goto cleanup; ++ } ++ ++ obj_priv = to_intel_bo(obj); ++ ring->map.size = ring->size; ++ ring->map.offset = dev->agp->base + obj_priv->gtt_offset; ++ ring->map.type = 0; ++ ring->map.flags = 0; ++ ring->map.mtrr = 0; ++ ++ drm_core_ioremap_wc(&ring->map, dev); ++ if (ring->map.handle == NULL) { ++ DRM_ERROR("Failed to map ringbuffer.\n"); ++ i915_gem_object_unpin(obj); ++ drm_gem_object_unreference(obj); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ ++ ring->virtual_start = ring->map.handle; ++ ret = ring->init(dev, ring); ++ if (ret != 0) { ++ intel_cleanup_ring_buffer(dev, ring); ++ return ret; ++ } ++ ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ i915_kernel_lost_context(dev); ++ else { ++ ring->head = ring->get_head(dev, ring); ++ ring->tail = ring->get_tail(dev, ring); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->size; ++ } ++ INIT_LIST_HEAD(&ring->active_list); ++ INIT_LIST_HEAD(&ring->request_list); ++ return ret; ++cleanup: ++ cleanup_status_page(dev, ring); ++ return ret; ++} ++ ++void intel_cleanup_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ if (ring->gem_object == NULL) ++ return; ++ ++ drm_core_ioremapfree(&ring->map, dev); ++ ++ i915_gem_object_unpin(ring->gem_object); ++ drm_gem_object_unreference(ring->gem_object); ++ ring->gem_object = NULL; ++ cleanup_status_page(dev, ring); ++} ++ ++int intel_wrap_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ unsigned int *virt; ++ int rem; ++ rem = ring->size - ring->tail; ++ ++ if (ring->space < rem) { ++ int ret = intel_wait_ring_buffer(dev, ring, rem); ++ if (ret) ++ return ret; ++ } ++ ++ virt = (unsigned int *)(ring->virtual_start + ring->tail); ++ rem /= 4; ++ while (rem--) ++ *virt++ = MI_NOOP; ++ ++ ring->tail = 0; ++ ++ return 0; ++} ++ ++int intel_wait_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring, int n) ++{ ++ unsigned long end; ++ ++ trace_i915_ring_wait_begin (dev); ++ end = jiffies + 3 * HZ; ++ do { ++ ring->head = ring->get_head(dev, ring); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->size; ++ if (ring->space >= n) { ++ trace_i915_ring_wait_end (dev); ++ return 0; ++ } ++ ++ if (dev->primary->master) { ++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; ++ if (master_priv->sarea_priv) ++ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; ++ } ++ ++ yield(); ++ } while (!time_after(jiffies, end)); ++ trace_i915_ring_wait_end (dev); ++ return -EBUSY; ++} ++ ++void intel_ring_begin(struct drm_device *dev, ++ struct intel_ring_buffer *ring, int n) ++{ ++ if (unlikely(ring->tail + n > ring->size)) ++ intel_wrap_ring_buffer(dev, ring); ++ if (unlikely(ring->space < n)) ++ intel_wait_ring_buffer(dev, ring, n); ++} ++ ++void intel_ring_emit(struct drm_device *dev, ++ struct intel_ring_buffer *ring, unsigned int data) ++{ ++ unsigned int *virt = ring->virtual_start + ring->tail; ++ *virt = data; ++ ring->tail += 4; ++ ring->tail &= ring->size - 1; ++ ring->space -= 4; ++} ++ ++void intel_ring_advance(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ ring->advance_ring(dev, ring); ++} ++ ++void intel_fill_struct(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ void *data, ++ unsigned int len) ++{ ++ unsigned int *virt = ring->virtual_start + ring->tail; ++ BUG_ON((len&~(4-1)) != 0); ++ intel_ring_begin(dev, ring, len); ++ memcpy(virt, data, len); ++ ring->tail += len; ++ ring->tail &= ring->size - 1; ++ ring->space -= len; ++ intel_ring_advance(dev, ring); ++} ++ ++u32 intel_ring_get_seqno(struct drm_device *dev, ++ struct intel_ring_buffer *ring) ++{ ++ u32 seqno; ++ seqno = ring->next_seqno; ++ ++ /* reserve 0 for non-seqno */ ++ if (++ring->next_seqno == 0) ++ ring->next_seqno = 1; ++ return seqno; ++} ++ ++struct intel_ring_buffer render_ring = { ++ .name = "render ring", ++ .regs = { ++ .ctl = PRB0_CTL, ++ .head = PRB0_HEAD, ++ .tail = PRB0_TAIL, ++ .start = PRB0_START ++ }, ++ .ring_flag = I915_EXEC_RENDER, ++ .size = 32 * PAGE_SIZE, ++ .alignment = PAGE_SIZE, ++ .virtual_start = NULL, ++ .dev = NULL, ++ .gem_object = NULL, ++ .head = 0, ++ .tail = 0, ++ .space = 0, ++ .next_seqno = 1, ++ .user_irq_refcount = 0, ++ .irq_gem_seqno = 0, ++ .waiting_gem_seqno = 0, ++ .setup_status_page = render_setup_status_page, ++ .init = init_render_ring, ++ .get_head = render_ring_get_head, ++ .get_tail = render_ring_get_tail, ++ .get_active_head = render_ring_get_active_head, ++ .advance_ring = render_ring_advance_ring, ++ .flush = render_ring_flush, ++ .add_request = render_ring_add_request, ++ .get_gem_seqno = render_ring_get_gem_seqno, ++ .user_irq_get = render_ring_get_user_irq, ++ .user_irq_put = render_ring_put_user_irq, ++ .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, ++ .status_page = {NULL, 0, NULL}, ++ .map = {0,} ++}; ++ ++/* ring buffer for bit-stream decoder */ ++ ++struct intel_ring_buffer bsd_ring = { ++ .name = "bsd ring", ++ .regs = { ++ .ctl = BSD_RING_CTL, ++ .head = BSD_RING_HEAD, ++ .tail = BSD_RING_TAIL, ++ .start = BSD_RING_START ++ }, ++ .ring_flag = I915_EXEC_BSD, ++ .size = 32 * PAGE_SIZE, ++ .alignment = PAGE_SIZE, ++ .virtual_start = NULL, ++ .dev = NULL, ++ .gem_object = NULL, ++ .head = 0, ++ .tail = 0, ++ .space = 0, ++ .next_seqno = 1, ++ .user_irq_refcount = 0, ++ .irq_gem_seqno = 0, ++ .waiting_gem_seqno = 0, ++ .setup_status_page = bsd_setup_status_page, ++ .init = init_bsd_ring, ++ .get_head = bsd_ring_get_head, ++ .get_tail = bsd_ring_get_tail, ++ .get_active_head = bsd_ring_get_active_head, ++ .advance_ring = bsd_ring_advance_ring, ++ .flush = bsd_ring_flush, ++ .add_request = bsd_ring_add_request, ++ .get_gem_seqno = bsd_ring_get_gem_seqno, ++ .user_irq_get = bsd_ring_get_user_irq, ++ .user_irq_put = bsd_ring_put_user_irq, ++ .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, ++ .status_page = {NULL, 0, NULL}, ++ .map = {0,} ++}; +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h +new file mode 100644 +index 0000000..d5568d3 +--- /dev/null ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h +@@ -0,0 +1,124 @@ ++#ifndef _INTEL_RINGBUFFER_H_ ++#define _INTEL_RINGBUFFER_H_ ++ ++struct intel_hw_status_page { ++ void *page_addr; ++ unsigned int gfx_addr; ++ struct drm_gem_object *obj; ++}; ++ ++struct drm_i915_gem_execbuffer2; ++struct intel_ring_buffer { ++ const char *name; ++ struct ring_regs { ++ u32 ctl; ++ u32 head; ++ u32 tail; ++ u32 start; ++ } regs; ++ unsigned int ring_flag; ++ unsigned long size; ++ unsigned int alignment; ++ void *virtual_start; ++ struct drm_device *dev; ++ struct drm_gem_object *gem_object; ++ ++ unsigned int head; ++ unsigned int tail; ++ unsigned int space; ++ u32 next_seqno; ++ struct intel_hw_status_page status_page; ++ ++ u32 irq_gem_seqno; /* last seq seem at irq time */ ++ u32 waiting_gem_seqno; ++ int user_irq_refcount; ++ void (*user_irq_get)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ void (*user_irq_put)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ void (*setup_status_page)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ ++ int (*init)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ ++ unsigned int (*get_head)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ unsigned int (*get_tail)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ unsigned int (*get_active_head)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ void (*advance_ring)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ void (*flush)(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ u32 invalidate_domains, ++ u32 flush_domains); ++ u32 (*add_request)(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ struct drm_file *file_priv, ++ u32 flush_domains); ++ u32 (*get_gem_seqno)(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ int (*dispatch_gem_execbuffer)(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ struct drm_i915_gem_execbuffer2 *exec, ++ struct drm_clip_rect *cliprects, ++ uint64_t exec_offset); ++ ++ /** ++ * List of objects currently involved in rendering from the ++ * ringbuffer. ++ * ++ * Includes buffers having the contents of their GPU caches ++ * flushed, not necessarily primitives. last_rendering_seqno ++ * represents when the rendering involved will be completed. ++ * ++ * A reference is held on the buffer while on this list. ++ */ ++ struct list_head active_list; ++ ++ /** ++ * List of breadcrumbs associated with GPU requests currently ++ * outstanding. ++ */ ++ struct list_head request_list; ++ ++ wait_queue_head_t irq_queue; ++ drm_local_map_t map; ++}; ++ ++static inline u32 ++intel_read_status_page(struct intel_ring_buffer *ring, ++ int reg) ++{ ++ u32 *regs = ring->status_page.page_addr; ++ return regs[reg]; ++} ++ ++int intel_init_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++void intel_cleanup_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++int intel_wait_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring, int n); ++int intel_wrap_ring_buffer(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++void intel_ring_begin(struct drm_device *dev, ++ struct intel_ring_buffer *ring, int n); ++void intel_ring_emit(struct drm_device *dev, ++ struct intel_ring_buffer *ring, u32 data); ++void intel_fill_struct(struct drm_device *dev, ++ struct intel_ring_buffer *ring, ++ void *data, ++ unsigned int len); ++void intel_ring_advance(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ ++u32 intel_ring_get_seqno(struct drm_device *dev, ++ struct intel_ring_buffer *ring); ++ ++extern struct intel_ring_buffer render_ring; ++extern struct intel_ring_buffer bsd_ring; ++ ++#endif /* _INTEL_RINGBUFFER_H_ */ +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index 87d9536..76993ac 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -36,7 +36,18 @@ + #include "i915_drm.h" + #include "i915_drv.h" + #include "intel_sdvo_regs.h" +-#include ++ ++#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) ++#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) ++#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) ++#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) ++ ++#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ ++ SDVO_TV_MASK) ++ ++#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) ++#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) ++ + + static char *tv_format_names[] = { + "NTSC_M" , "NTSC_J" , "NTSC_443", +@@ -86,12 +97,6 @@ struct intel_sdvo_priv { + /* This is for current tv format name */ + char *tv_format_name; + +- /* This contains all current supported TV format */ +- char *tv_format_supported[TV_FORMAT_NUM]; +- int format_supported_num; +- struct drm_property *tv_format_property; +- struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; +- + /** + * This is set if we treat the device as HDMI, instead of DVI. + */ +@@ -112,12 +117,6 @@ struct intel_sdvo_priv { + */ + struct drm_display_mode *sdvo_lvds_fixed_mode; + +- /** +- * Returned SDTV resolutions allowed for the current format, if the +- * device reported it. +- */ +- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; +- + /* + * supported encoding mode, used to determine whether HDMI is + * supported +@@ -130,11 +129,24 @@ struct intel_sdvo_priv { + /* Mac mini hack -- use the same DDC as the analog connector */ + struct i2c_adapter *analog_ddc_bus; + +- int save_sdvo_mult; +- u16 save_active_outputs; +- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; +- struct intel_sdvo_dtd save_output_dtd[16]; +- u32 save_SDVOX; ++}; ++ ++struct intel_sdvo_connector { ++ /* Mark the type of connector */ ++ uint16_t output_flag; ++ ++ /* This contains all current supported TV format */ ++ char *tv_format_supported[TV_FORMAT_NUM]; ++ int format_supported_num; ++ struct drm_property *tv_format_property; ++ struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; ++ ++ /** ++ * Returned SDTV resolutions allowed for the current format, if the ++ * device reported it. ++ */ ++ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; ++ + /* add the property for the SDVO-TV */ + struct drm_property *left_property; + struct drm_property *right_property; +@@ -162,7 +174,12 @@ struct intel_sdvo_priv { + }; + + static bool +-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); ++intel_sdvo_output_setup(struct intel_encoder *intel_encoder, ++ uint16_t flags); ++static void ++intel_sdvo_tv_create_property(struct drm_connector *connector, int type); ++static void ++intel_sdvo_create_enhance_property(struct drm_connector *connector); + + /** + * Writes the SDVOB or SDVOC with the given value, but always writes both +@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); + */ + static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) + { +- struct drm_device *dev = intel_encoder->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u32 bval = val, cval = val; + int i; + ++ if (sdvo_priv->sdvo_reg == PCH_SDVOB) { ++ I915_WRITE(sdvo_priv->sdvo_reg, val); ++ I915_READ(sdvo_priv->sdvo_reg); ++ return; ++ } ++ + if (sdvo_priv->sdvo_reg == SDVOB) { + cval = I915_READ(SDVOC); + } else { +@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name { + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), + }; + +-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") ++#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) ++#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") + #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) + + static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, +@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b + return true; + } + +-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, +- u16 *outputs) +-{ +- u8 status; +- +- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); +- status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); +- +- return (status == SDVO_CMD_STATUS_SUCCESS); +-} +- + static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, + u16 outputs) + { +@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, + return (status == SDVO_CMD_STATUS_SUCCESS); + } + +-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, +- struct intel_sdvo_dtd *dtd) +-{ +- u8 status; +- +- intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); +- status = intel_sdvo_read_response(intel_encoder, &dtd->part1, +- sizeof(dtd->part1)); +- if (status != SDVO_CMD_STATUS_SUCCESS) +- return false; +- +- intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); +- status = intel_sdvo_read_response(intel_encoder, &dtd->part2, +- sizeof(dtd->part2)); +- if (status != SDVO_CMD_STATUS_SUCCESS) +- return false; +- +- return true; +-} +- +-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, +- struct intel_sdvo_dtd *dtd) +-{ +- return intel_sdvo_get_timing(intel_encoder, +- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); +-} +- +-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, +- struct intel_sdvo_dtd *dtd) +-{ +- return intel_sdvo_get_timing(intel_encoder, +- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); +-} +- + static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, + struct intel_sdvo_dtd *dtd) + { +@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en + return false; + } + +-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) +-{ +- u8 response, status; +- +- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); +- status = intel_sdvo_read_response(intel_encoder, &response, 1); +- +- if (status != SDVO_CMD_STATUS_SUCCESS) { +- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); +- return SDVO_CLOCK_RATE_MULT_1X; +- } else { +- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); +- } +- +- return response; +-} +- + static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) + { + u8 status; +@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) + memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? + sizeof(format) : sizeof(format_map)); + +- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, + sizeof(format)); + + status = intel_sdvo_read_response(intel_encoder, NULL, 0); +@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + /* Set output timings */ + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + intel_sdvo_set_target_output(intel_encoder, +- dev_priv->controlled_output); ++ dev_priv->attached_output); + intel_sdvo_set_output_timing(intel_encoder, &output_dtd); + + /* Set the input timing to the screen. Assume always input 0. */ +@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + dev_priv->sdvo_lvds_fixed_mode); + + intel_sdvo_set_target_output(intel_encoder, +- dev_priv->controlled_output); ++ dev_priv->attached_output); + intel_sdvo_set_output_timing(intel_encoder, &output_dtd); + + /* Set the input timing to the screen. Assume always input 0. */ +@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + * channel on the motherboard. In a two-input device, the first input + * will be SDVOB and the second SDVOC. + */ +- in_out.in0 = sdvo_priv->controlled_output; ++ in_out.in0 = sdvo_priv->attached_output; + in_out.in1 = 0; + + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, +@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { + /* Set the output timing to the screen */ + intel_sdvo_set_target_output(intel_encoder, +- sdvo_priv->controlled_output); ++ sdvo_priv->attached_output); + intel_sdvo_set_output_timing(intel_encoder, &input_dtd); + } + +@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) + + if (0) + intel_sdvo_set_encoder_power_state(intel_encoder, mode); +- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); ++ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); + } + return; + } + +-static void intel_sdvo_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; +- int o; +- +- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); +- intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { +- intel_sdvo_set_target_input(intel_encoder, true, false); +- intel_sdvo_get_input_timing(intel_encoder, +- &sdvo_priv->save_input_dtd_1); +- } +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { +- intel_sdvo_set_target_input(intel_encoder, false, true); +- intel_sdvo_get_input_timing(intel_encoder, +- &sdvo_priv->save_input_dtd_2); +- } +- +- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) +- { +- u16 this_output = (1 << o); +- if (sdvo_priv->caps.output_flags & this_output) +- { +- intel_sdvo_set_target_output(intel_encoder, this_output); +- intel_sdvo_get_output_timing(intel_encoder, +- &sdvo_priv->save_output_dtd[o]); +- } +- } +- if (sdvo_priv->is_tv) { +- /* XXX: Save TV format/enhancements. */ +- } +- +- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); +-} +- +-static void intel_sdvo_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; +- int o; +- int i; +- bool input1, input2; +- u8 status; +- +- intel_sdvo_set_active_outputs(intel_encoder, 0); +- +- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) +- { +- u16 this_output = (1 << o); +- if (sdvo_priv->caps.output_flags & this_output) { +- intel_sdvo_set_target_output(intel_encoder, this_output); +- intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); +- } +- } +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { +- intel_sdvo_set_target_input(intel_encoder, true, false); +- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); +- } +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { +- intel_sdvo_set_target_input(intel_encoder, false, true); +- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); +- } +- +- intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); +- +- if (sdvo_priv->is_tv) { +- /* XXX: Restore TV format/enhancements. */ +- } +- +- intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); +- +- if (sdvo_priv->save_SDVOX & SDVO_ENABLE) +- { +- for (i = 0; i < 2; i++) +- intel_wait_for_vblank(dev); +- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); +- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) +- DRM_DEBUG_KMS("First %s output reported failure to " +- "sync\n", SDVO_NAME(sdvo_priv)); +- } +- +- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); +-} +- + static int intel_sdvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) +@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str + return true; + } + ++/* No use! */ ++#if 0 + struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) + { + struct drm_connector *connector = NULL; +@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_encoder, &response, 2); + } ++#endif + + static bool + intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) +@@ -1598,12 +1472,17 @@ static struct drm_connector * + intel_find_analog_connector(struct drm_device *dev) + { + struct drm_connector *connector; ++ struct drm_encoder *encoder; + struct intel_encoder *intel_encoder; + +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- intel_encoder = to_intel_encoder(connector); +- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) +- return connector; ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ intel_encoder = enc_to_intel_encoder(encoder); ++ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (encoder == intel_attached_encoder(connector)) ++ return connector; ++ } ++ } + } + return NULL; + } +@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev) + } + + enum drm_connector_status +-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) ++intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + enum drm_connector_status status = connector_status_connected; + struct edid *edid = NULL; + +- edid = drm_get_edid(&intel_encoder->base, +- intel_encoder->ddc_bus); ++ edid = drm_get_edid(connector, intel_encoder->ddc_bus); + + /* This is only applied to SDVO cards with multiple outputs */ + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { +@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + */ + while(temp_ddc > 1) { + sdvo_priv->ddc_bus = temp_ddc; +- edid = drm_get_edid(&intel_encoder->base, +- intel_encoder->ddc_bus); ++ edid = drm_get_edid(connector, intel_encoder->ddc_bus); + if (edid) { + /* + * When we can get the EDID, maybe it is the +@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + /* when there is no edid and no monitor is connected with VGA + * port, try to use the CRT ddc to read the EDID for DVI-connector + */ +- if (edid == NULL && +- sdvo_priv->analog_ddc_bus && +- !intel_analog_is_connected(intel_encoder->base.dev)) +- edid = drm_get_edid(&intel_encoder->base, +- sdvo_priv->analog_ddc_bus); ++ if (edid == NULL && sdvo_priv->analog_ddc_bus && ++ !intel_analog_is_connected(connector->dev)) ++ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); ++ + if (edid != NULL) { +- /* Don't report the output as connected if it's a DVI-I +- * connector with a non-digital EDID coming out. +- */ +- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { +- if (edid->input & DRM_EDID_INPUT_DIGITAL) +- sdvo_priv->is_hdmi = +- drm_detect_hdmi_monitor(edid); +- else +- status = connector_status_disconnected; +- } ++ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); ++ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); + +- kfree(edid); +- intel_encoder->base.display_info.raw_edid = NULL; ++ /* DDC bus is shared, match EDID to connector type */ ++ if (is_digital && need_digital) ++ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); ++ else if (is_digital != need_digital) ++ status = connector_status_disconnected; + +- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) ++ connector->display_info.raw_edid = NULL; ++ } else + status = connector_status_disconnected; ++ ++ kfree(edid); + + return status; + } +@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect + { + uint16_t response; + u8 status; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; ++ enum drm_connector_status ret; + + intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); +@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect + if (response == 0) + return connector_status_disconnected; + +- if (intel_sdvo_multifunc_encoder(intel_encoder) && +- sdvo_priv->attached_output != response) { +- if (sdvo_priv->controlled_output != response && +- intel_sdvo_output_setup(intel_encoder, response) != true) +- return connector_status_unknown; +- sdvo_priv->attached_output = response; ++ sdvo_priv->attached_output = response; ++ ++ if ((sdvo_connector->output_flag & response) == 0) ++ ret = connector_status_disconnected; ++ else if (response & SDVO_TMDS_MASK) ++ ret = intel_sdvo_hdmi_sink_detect(connector); ++ else ++ ret = connector_status_connected; ++ ++ /* May update encoder flag for like clock for SDVO TV, etc.*/ ++ if (ret == connector_status_connected) { ++ sdvo_priv->is_tv = false; ++ sdvo_priv->is_lvds = false; ++ intel_encoder->needs_tv_clock = false; ++ ++ if (response & SDVO_TV_MASK) { ++ sdvo_priv->is_tv = true; ++ intel_encoder->needs_tv_clock = true; ++ } ++ if (response & SDVO_LVDS_MASK) ++ sdvo_priv->is_lvds = true; + } +- return intel_sdvo_hdmi_sink_detect(connector, response); ++ ++ return ret; + } + + static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + int num_modes; + + /* set the bus switch and get the modes */ +- num_modes = intel_ddc_get_modes(intel_encoder); ++ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + + /* + * Mac mini hack. On this device, the DVI-I connector shares one DDC +@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) + */ + if (num_modes == 0 && + sdvo_priv->analog_ddc_bus && +- !intel_analog_is_connected(intel_encoder->base.dev)) { +- struct i2c_adapter *digital_ddc_bus; +- ++ !intel_analog_is_connected(connector->dev)) { + /* Switch to the analog ddc bus and try that + */ +- digital_ddc_bus = intel_encoder->ddc_bus; +- intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; +- +- (void) intel_ddc_get_modes(intel_encoder); +- +- intel_encoder->ddc_bus = digital_ddc_bus; ++ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); + } + } + +@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = { + + static void intel_sdvo_get_tv_modes(struct drm_connector *connector) + { +- struct intel_encoder *output = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo_sdtv_resolution_request tv_res; + uint32_t reply = 0, format_map = 0; + int i; +@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) + sizeof(format_map) ? sizeof(format_map) : + sizeof(struct intel_sdvo_sdtv_resolution_request)); + +- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); ++ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); + +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + &tv_res, sizeof(tv_res)); +- status = intel_sdvo_read_response(output, &reply, 3); ++ status = intel_sdvo_read_response(intel_encoder, &reply, 3); + if (status != SDVO_CMD_STATUS_SUCCESS) + return; + +@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) + + static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_display_mode *newmode; +@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) + * Assume that the preferred modes are + * arranged in priority order. + */ +- intel_ddc_get_modes(intel_encoder); ++ intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (list_empty(&connector->probed_modes) == false) + goto end; + +@@ -1902,12 +1795,12 @@ end: + + static int intel_sdvo_get_modes(struct drm_connector *connector) + { +- struct intel_encoder *output = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + +- if (sdvo_priv->is_tv) ++ if (IS_TV(sdvo_connector)) + intel_sdvo_get_tv_modes(connector); +- else if (sdvo_priv->is_lvds == true) ++ else if (IS_LVDS(sdvo_connector)) + intel_sdvo_get_lvds_modes(connector); + else + intel_sdvo_get_ddc_modes(connector); +@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) + static + void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; + struct drm_device *dev = connector->dev; + +- if (sdvo_priv->is_tv) { ++ if (IS_TV(sdvo_priv)) { + if (sdvo_priv->left_property) + drm_property_destroy(dev, sdvo_priv->left_property); + if (sdvo_priv->right_property) +@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + drm_property_destroy(dev, sdvo_priv->hpos_property); + if (sdvo_priv->vpos_property) + drm_property_destroy(dev, sdvo_priv->vpos_property); +- } +- if (sdvo_priv->is_tv) { + if (sdvo_priv->saturation_property) + drm_property_destroy(dev, + sdvo_priv->saturation_property); +@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + if (sdvo_priv->hue_property) + drm_property_destroy(dev, sdvo_priv->hue_property); + } +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { ++ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { + if (sdvo_priv->brightness_property) + drm_property_destroy(dev, + sdvo_priv->brightness_property); +@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + + static void intel_sdvo_destroy(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; +- +- if (intel_encoder->i2c_bus) +- intel_i2c_destroy(intel_encoder->i2c_bus); +- if (intel_encoder->ddc_bus) +- intel_i2c_destroy(intel_encoder->ddc_bus); +- if (sdvo_priv->analog_ddc_bus) +- intel_i2c_destroy(sdvo_priv->analog_ddc_bus); +- +- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) +- drm_mode_destroy(connector->dev, +- sdvo_priv->sdvo_lvds_fixed_mode); ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + +- if (sdvo_priv->tv_format_property) ++ if (sdvo_connector->tv_format_property) + drm_property_destroy(connector->dev, +- sdvo_priv->tv_format_property); +- +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) +- intel_sdvo_destroy_enhance_property(connector); ++ sdvo_connector->tv_format_property); + ++ intel_sdvo_destroy_enhance_property(connector); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- +- kfree(intel_encoder); ++ kfree(connector); + } + + static int +@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; +- struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct drm_crtc *crtc = encoder->crtc; + int ret = 0; + bool changed = false; +@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector, + if (ret < 0) + goto out; + +- if (property == sdvo_priv->tv_format_property) { ++ if (property == sdvo_connector->tv_format_property) { + if (val >= TV_FORMAT_NUM) { + ret = -EINVAL; + goto out; + } + if (sdvo_priv->tv_format_name == +- sdvo_priv->tv_format_supported[val]) ++ sdvo_connector->tv_format_supported[val]) + goto out; + +- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; ++ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; + changed = true; + } + +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { ++ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { + cmd = 0; + temp_value = val; +- if (sdvo_priv->left_property == property) { ++ if (sdvo_connector->left_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->right_property, val); +- if (sdvo_priv->left_margin == temp_value) ++ sdvo_connector->right_property, val); ++ if (sdvo_connector->left_margin == temp_value) + goto out; + +- sdvo_priv->left_margin = temp_value; +- sdvo_priv->right_margin = temp_value; +- temp_value = sdvo_priv->max_hscan - +- sdvo_priv->left_margin; ++ sdvo_connector->left_margin = temp_value; ++ sdvo_connector->right_margin = temp_value; ++ temp_value = sdvo_connector->max_hscan - ++ sdvo_connector->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; +- } else if (sdvo_priv->right_property == property) { ++ } else if (sdvo_connector->right_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->left_property, val); +- if (sdvo_priv->right_margin == temp_value) ++ sdvo_connector->left_property, val); ++ if (sdvo_connector->right_margin == temp_value) + goto out; + +- sdvo_priv->left_margin = temp_value; +- sdvo_priv->right_margin = temp_value; +- temp_value = sdvo_priv->max_hscan - +- sdvo_priv->left_margin; ++ sdvo_connector->left_margin = temp_value; ++ sdvo_connector->right_margin = temp_value; ++ temp_value = sdvo_connector->max_hscan - ++ sdvo_connector->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; +- } else if (sdvo_priv->top_property == property) { ++ } else if (sdvo_connector->top_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->bottom_property, val); +- if (sdvo_priv->top_margin == temp_value) ++ sdvo_connector->bottom_property, val); ++ if (sdvo_connector->top_margin == temp_value) + goto out; + +- sdvo_priv->top_margin = temp_value; +- sdvo_priv->bottom_margin = temp_value; +- temp_value = sdvo_priv->max_vscan - +- sdvo_priv->top_margin; ++ sdvo_connector->top_margin = temp_value; ++ sdvo_connector->bottom_margin = temp_value; ++ temp_value = sdvo_connector->max_vscan - ++ sdvo_connector->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; +- } else if (sdvo_priv->bottom_property == property) { ++ } else if (sdvo_connector->bottom_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->top_property, val); +- if (sdvo_priv->bottom_margin == temp_value) ++ sdvo_connector->top_property, val); ++ if (sdvo_connector->bottom_margin == temp_value) + goto out; +- sdvo_priv->top_margin = temp_value; +- sdvo_priv->bottom_margin = temp_value; +- temp_value = sdvo_priv->max_vscan - +- sdvo_priv->top_margin; ++ sdvo_connector->top_margin = temp_value; ++ sdvo_connector->bottom_margin = temp_value; ++ temp_value = sdvo_connector->max_vscan - ++ sdvo_connector->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; +- } else if (sdvo_priv->hpos_property == property) { +- if (sdvo_priv->cur_hpos == temp_value) ++ } else if (sdvo_connector->hpos_property == property) { ++ if (sdvo_connector->cur_hpos == temp_value) + goto out; + + cmd = SDVO_CMD_SET_POSITION_H; +- sdvo_priv->cur_hpos = temp_value; +- } else if (sdvo_priv->vpos_property == property) { +- if (sdvo_priv->cur_vpos == temp_value) ++ sdvo_connector->cur_hpos = temp_value; ++ } else if (sdvo_connector->vpos_property == property) { ++ if (sdvo_connector->cur_vpos == temp_value) + goto out; + + cmd = SDVO_CMD_SET_POSITION_V; +- sdvo_priv->cur_vpos = temp_value; +- } else if (sdvo_priv->saturation_property == property) { +- if (sdvo_priv->cur_saturation == temp_value) ++ sdvo_connector->cur_vpos = temp_value; ++ } else if (sdvo_connector->saturation_property == property) { ++ if (sdvo_connector->cur_saturation == temp_value) + goto out; + + cmd = SDVO_CMD_SET_SATURATION; +- sdvo_priv->cur_saturation = temp_value; +- } else if (sdvo_priv->contrast_property == property) { +- if (sdvo_priv->cur_contrast == temp_value) ++ sdvo_connector->cur_saturation = temp_value; ++ } else if (sdvo_connector->contrast_property == property) { ++ if (sdvo_connector->cur_contrast == temp_value) + goto out; + + cmd = SDVO_CMD_SET_CONTRAST; +- sdvo_priv->cur_contrast = temp_value; +- } else if (sdvo_priv->hue_property == property) { +- if (sdvo_priv->cur_hue == temp_value) ++ sdvo_connector->cur_contrast = temp_value; ++ } else if (sdvo_connector->hue_property == property) { ++ if (sdvo_connector->cur_hue == temp_value) + goto out; + + cmd = SDVO_CMD_SET_HUE; +- sdvo_priv->cur_hue = temp_value; +- } else if (sdvo_priv->brightness_property == property) { +- if (sdvo_priv->cur_brightness == temp_value) ++ sdvo_connector->cur_hue = temp_value; ++ } else if (sdvo_connector->brightness_property == property) { ++ if (sdvo_connector->cur_brightness == temp_value) + goto out; + + cmd = SDVO_CMD_SET_BRIGHTNESS; +- sdvo_priv->cur_brightness = temp_value; ++ sdvo_connector->cur_brightness = temp_value; + } + if (cmd) { + intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); +@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { + + static const struct drm_connector_funcs intel_sdvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_sdvo_save, +- .restore = intel_sdvo_restore, + .detect = intel_sdvo_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_sdvo_set_property, +@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { + static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { + .get_modes = intel_sdvo_get_modes, + .mode_valid = intel_sdvo_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); ++ if (sdvo_priv->analog_ddc_bus) ++ intel_i2c_destroy(sdvo_priv->analog_ddc_bus); ++ ++ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) ++ drm_mode_destroy(encoder->dev, ++ sdvo_priv->sdvo_lvds_fixed_mode); ++ + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { +@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { + * outputs, then LVDS outputs. + */ + static void +-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) ++intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, ++ struct intel_sdvo_priv *sdvo, u32 reg) + { +- uint16_t mask = 0; +- unsigned int num_bits; +- +- /* Make a mask of outputs less than or equal to our own priority in the +- * list. +- */ +- switch (dev_priv->controlled_output) { +- case SDVO_OUTPUT_LVDS1: +- mask |= SDVO_OUTPUT_LVDS1; +- case SDVO_OUTPUT_LVDS0: +- mask |= SDVO_OUTPUT_LVDS0; +- case SDVO_OUTPUT_TMDS1: +- mask |= SDVO_OUTPUT_TMDS1; +- case SDVO_OUTPUT_TMDS0: +- mask |= SDVO_OUTPUT_TMDS0; +- case SDVO_OUTPUT_RGB1: +- mask |= SDVO_OUTPUT_RGB1; +- case SDVO_OUTPUT_RGB0: +- mask |= SDVO_OUTPUT_RGB0; +- break; +- } ++ struct sdvo_device_mapping *mapping; + +- /* Count bits to find what number we are in the priority list. */ +- mask &= dev_priv->caps.output_flags; +- num_bits = hweight16(mask); +- if (num_bits > 3) { +- /* if more than 3 outputs, default to DDC bus 3 for now */ +- num_bits = 3; +- } ++ if (IS_SDVOB(reg)) ++ mapping = &(dev_priv->sdvo_mappings[0]); ++ else ++ mapping = &(dev_priv->sdvo_mappings[1]); + +- /* Corresponds to SDVO_CONTROL_BUS_DDCx */ +- dev_priv->ddc_bus = 1 << num_bits; ++ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); + } + + static bool +-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) ++intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) + { + struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + uint8_t status; + +- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); ++ if (device == 0) ++ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); ++ else ++ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); + + intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); + status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); +@@ -2214,15 +2086,13 @@ static struct intel_encoder * + intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) + { + struct drm_device *dev = chan->drm_dev; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + struct intel_encoder *intel_encoder = NULL; + +- list_for_each_entry(connector, +- &dev->mode_config.connector_list, head) { +- if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { +- intel_encoder = to_intel_encoder(connector); ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ intel_encoder = enc_to_intel_encoder(encoder); ++ if (intel_encoder->ddc_bus == &chan->adapter) + break; +- } + } + return intel_encoder; + } +@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) + struct drm_i915_private *dev_priv = dev->dev_private; + struct sdvo_device_mapping *my_mapping, *other_mapping; + +- if (sdvo_reg == SDVOB) { ++ if (IS_SDVOB(sdvo_reg)) { + my_mapping = &dev_priv->sdvo_mappings[0]; + other_mapping = &dev_priv->sdvo_mappings[1]; + } else { +@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) + /* No SDVO device info is found for another DVO port, + * so use mapping assumption we had before BIOS parsing. + */ +- if (sdvo_reg == SDVOB) ++ if (IS_SDVOB(sdvo_reg)) + return 0x70; + else + return 0x72; + } + +-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) ++static bool ++intel_sdvo_connector_alloc (struct intel_connector **ret) + { +- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); +- return 1; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ *ret = kzalloc(sizeof(*intel_connector) + ++ sizeof(*sdvo_connector), GFP_KERNEL); ++ if (!*ret) ++ return false; ++ ++ intel_connector = *ret; ++ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); ++ intel_connector->dev_priv = sdvo_connector; ++ ++ return true; + } + +-static struct dmi_system_id intel_sdvo_bad_tv[] = { +- { +- .callback = intel_sdvo_bad_tv_callback, +- .ident = "IntelG45/ICH10R/DME1737", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), +- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"), +- }, +- }, ++static void ++intel_sdvo_connector_create (struct drm_encoder *encoder, ++ struct drm_connector *connector) ++{ ++ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, ++ connector->connector_type); + +- { } /* terminating entry */ +-}; ++ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); ++ ++ connector->interlace_allowed = 0; ++ connector->doublescan_allowed = 0; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ ++ drm_mode_connector_attach_encoder(connector, encoder); ++ drm_sysfs_connector_add(connector); ++} + + static bool +-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) ++intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) + { +- struct drm_connector *connector = &intel_encoder->base; + struct drm_encoder *encoder = &intel_encoder->enc; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; +- bool ret = true, registered = false; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ sdvo_connector = intel_connector->dev_priv; ++ ++ if (device == 0) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; ++ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; ++ } else if (device == 1) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; ++ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; ++ } ++ ++ connector = &intel_connector->base; ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; ++ encoder->encoder_type = DRM_MODE_ENCODER_TMDS; ++ connector->connector_type = DRM_MODE_CONNECTOR_DVID; ++ ++ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) ++ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) ++ && sdvo_priv->is_hdmi) { ++ /* enable hdmi encoding mode if supported */ ++ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); ++ intel_sdvo_set_colorimetry(intel_encoder, ++ SDVO_COLORIMETRY_RGB256); ++ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; ++ } ++ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | ++ (1 << INTEL_ANALOG_CLONE_BIT); ++ ++ intel_sdvo_connector_create(encoder, connector); ++ ++ return true; ++} ++ ++static bool ++intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ connector = &intel_connector->base; ++ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; ++ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; ++ sdvo_connector = intel_connector->dev_priv; ++ ++ sdvo_priv->controlled_output |= type; ++ sdvo_connector->output_flag = type; ++ ++ sdvo_priv->is_tv = true; ++ intel_encoder->needs_tv_clock = true; ++ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; ++ ++ intel_sdvo_connector_create(encoder, connector); ++ ++ intel_sdvo_tv_create_property(connector, type); ++ ++ intel_sdvo_create_enhance_property(connector); ++ ++ return true; ++} ++ ++static bool ++intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ connector = &intel_connector->base; ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ encoder->encoder_type = DRM_MODE_ENCODER_DAC; ++ connector->connector_type = DRM_MODE_CONNECTOR_VGA; ++ sdvo_connector = intel_connector->dev_priv; ++ ++ if (device == 0) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; ++ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; ++ } else if (device == 1) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; ++ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; ++ } ++ ++ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | ++ (1 << INTEL_ANALOG_CLONE_BIT); ++ ++ intel_sdvo_connector_create(encoder, connector); ++ return true; ++} ++ ++static bool ++intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ connector = &intel_connector->base; ++ encoder->encoder_type = DRM_MODE_ENCODER_LVDS; ++ connector->connector_type = DRM_MODE_CONNECTOR_LVDS; ++ sdvo_connector = intel_connector->dev_priv; ++ ++ sdvo_priv->is_lvds = true; ++ ++ if (device == 0) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; ++ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; ++ } else if (device == 1) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; ++ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; ++ } ++ ++ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | ++ (1 << INTEL_SDVO_LVDS_CLONE_BIT); ++ ++ intel_sdvo_connector_create(encoder, connector); ++ intel_sdvo_create_enhance_property(connector); ++ return true; ++} ++ ++static bool ++intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) ++{ ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + + sdvo_priv->is_tv = false; + intel_encoder->needs_tv_clock = false; + sdvo_priv->is_lvds = false; + +- if (device_is_registered(&connector->kdev)) { +- drm_sysfs_connector_remove(connector); +- registered = true; +- } ++ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + +- if (flags & +- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { +- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) +- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; +- else +- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; +- +- encoder->encoder_type = DRM_MODE_ENCODER_TMDS; +- connector->connector_type = DRM_MODE_CONNECTOR_DVID; +- +- if (intel_sdvo_get_supp_encode(intel_encoder, +- &sdvo_priv->encode) && +- intel_sdvo_get_digital_encoding_mode(intel_encoder) && +- sdvo_priv->is_hdmi) { +- /* enable hdmi encoding mode if supported */ +- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); +- intel_sdvo_set_colorimetry(intel_encoder, +- SDVO_COLORIMETRY_RGB256); +- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; +- intel_encoder->clone_mask = +- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | +- (1 << INTEL_ANALOG_CLONE_BIT); +- } +- } else if ((flags & SDVO_OUTPUT_SVID0) && +- !dmi_check_system(intel_sdvo_bad_tv)) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; +- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; +- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; +- sdvo_priv->is_tv = true; +- intel_encoder->needs_tv_clock = true; +- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; +- } else if (flags & SDVO_OUTPUT_RGB0) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; +- encoder->encoder_type = DRM_MODE_ENCODER_DAC; +- connector->connector_type = DRM_MODE_CONNECTOR_VGA; +- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | +- (1 << INTEL_ANALOG_CLONE_BIT); +- } else if (flags & SDVO_OUTPUT_RGB1) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; +- encoder->encoder_type = DRM_MODE_ENCODER_DAC; +- connector->connector_type = DRM_MODE_CONNECTOR_VGA; +- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | +- (1 << INTEL_ANALOG_CLONE_BIT); +- } else if (flags & SDVO_OUTPUT_CVBS0) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; +- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; +- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; +- sdvo_priv->is_tv = true; +- intel_encoder->needs_tv_clock = true; +- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; +- } else if (flags & SDVO_OUTPUT_LVDS0) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; +- encoder->encoder_type = DRM_MODE_ENCODER_LVDS; +- connector->connector_type = DRM_MODE_CONNECTOR_LVDS; +- sdvo_priv->is_lvds = true; +- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | +- (1 << INTEL_SDVO_LVDS_CLONE_BIT); +- } else if (flags & SDVO_OUTPUT_LVDS1) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; +- encoder->encoder_type = DRM_MODE_ENCODER_LVDS; +- connector->connector_type = DRM_MODE_CONNECTOR_LVDS; +- sdvo_priv->is_lvds = true; +- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | +- (1 << INTEL_SDVO_LVDS_CLONE_BIT); +- } else { ++ if (flags & SDVO_OUTPUT_TMDS0) ++ if (!intel_sdvo_dvi_init(intel_encoder, 0)) ++ return false; ++ ++ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) ++ if (!intel_sdvo_dvi_init(intel_encoder, 1)) ++ return false; ++ ++ /* TV has no XXX1 function block */ ++ if (flags & SDVO_OUTPUT_SVID0) ++ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) ++ return false; ++ ++ if (flags & SDVO_OUTPUT_CVBS0) ++ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) ++ return false; ++ ++ if (flags & SDVO_OUTPUT_RGB0) ++ if (!intel_sdvo_analog_init(intel_encoder, 0)) ++ return false; ++ ++ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) ++ if (!intel_sdvo_analog_init(intel_encoder, 1)) ++ return false; ++ ++ if (flags & SDVO_OUTPUT_LVDS0) ++ if (!intel_sdvo_lvds_init(intel_encoder, 0)) ++ return false; + ++ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) ++ if (!intel_sdvo_lvds_init(intel_encoder, 1)) ++ return false; ++ ++ if ((flags & SDVO_OUTPUT_MASK) == 0) { + unsigned char bytes[2]; + + sdvo_priv->controlled_output = 0; +@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) + DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), + bytes[0], bytes[1]); +- ret = false; ++ return false; + } + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + +- if (ret && registered) +- ret = drm_sysfs_connector_add(connector) == 0 ? true : false; +- +- +- return ret; +- ++ return true; + } + +-static void intel_sdvo_tv_create_property(struct drm_connector *connector) ++static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct intel_sdvo_tv_format format; + uint32_t format_map, i; + uint8_t status; + +- intel_sdvo_set_target_output(intel_encoder, +- sdvo_priv->controlled_output); ++ intel_sdvo_set_target_output(intel_encoder, type); + + intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); +@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) + if (format_map == 0) + return; + +- sdvo_priv->format_supported_num = 0; ++ sdvo_connector->format_supported_num = 0; + for (i = 0 ; i < TV_FORMAT_NUM; i++) + if (format_map & (1 << i)) { +- sdvo_priv->tv_format_supported +- [sdvo_priv->format_supported_num++] = ++ sdvo_connector->tv_format_supported ++ [sdvo_connector->format_supported_num++] = + tv_format_names[i]; + } + + +- sdvo_priv->tv_format_property = ++ sdvo_connector->tv_format_property = + drm_property_create( + connector->dev, DRM_MODE_PROP_ENUM, +- "mode", sdvo_priv->format_supported_num); ++ "mode", sdvo_connector->format_supported_num); + +- for (i = 0; i < sdvo_priv->format_supported_num; i++) ++ for (i = 0; i < sdvo_connector->format_supported_num; i++) + drm_property_add_enum( +- sdvo_priv->tv_format_property, i, +- i, sdvo_priv->tv_format_supported[i]); ++ sdvo_connector->tv_format_property, i, ++ i, sdvo_connector->tv_format_supported[i]); + +- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; ++ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; + drm_connector_attach_property( +- connector, sdvo_priv->tv_format_property, 0); ++ connector, sdvo_connector->tv_format_property, 0); + + } + + static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; + struct intel_sdvo_enhancements_reply sdvo_data; + struct drm_device *dev = connector->dev; + uint8_t status; +@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + DRM_DEBUG_KMS("No enhancement is supported\n"); + return; + } +- if (sdvo_priv->is_tv) { ++ if (IS_TV(sdvo_priv)) { + /* when horizontal overscan is supported, Add the left/right + * property + */ +@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + "default %d, current %d\n", + data_value[0], data_value[1], response); + } +- } +- if (sdvo_priv->is_tv) { + if (sdvo_data.saturation) { + intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_SATURATION, NULL, 0); +@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + } +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { ++ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { + if (sdvo_data.brightness) { + intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); +@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- struct drm_connector *connector; + struct intel_encoder *intel_encoder; + struct intel_sdvo_priv *sdvo_priv; +- + u8 ch[0x40]; + int i; ++ u32 i2c_reg, ddc_reg, analog_ddc_reg; + + intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); + if (!intel_encoder) { +@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + intel_encoder->dev_priv = sdvo_priv; + intel_encoder->type = INTEL_OUTPUT_SDVO; + ++ if (HAS_PCH_SPLIT(dev)) { ++ i2c_reg = PCH_GPIOE; ++ ddc_reg = PCH_GPIOE; ++ analog_ddc_reg = PCH_GPIOA; ++ } else { ++ i2c_reg = GPIOE; ++ ddc_reg = GPIOE; ++ analog_ddc_reg = GPIOA; ++ } ++ + /* setup the DDC bus. */ +- if (sdvo_reg == SDVOB) +- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); ++ if (IS_SDVOB(sdvo_reg)) ++ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); + else +- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); ++ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); + + if (!intel_encoder->i2c_bus) + goto err_inteloutput; +@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + for (i = 0; i < 0x40; i++) { + if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { + DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", +- sdvo_reg == SDVOB ? 'B' : 'C'); ++ IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + goto err_i2c; + } + } + + /* setup the DDC bus. */ +- if (sdvo_reg == SDVOB) { +- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); +- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, ++ if (IS_SDVOB(sdvo_reg)) { ++ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); ++ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, + "SDVOB/VGA DDC BUS"); + dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; + } else { +- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); +- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, ++ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); ++ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, + "SDVOC/VGA DDC BUS"); + dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; + } +@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + /* Wrap with our custom algo which switches to DDC mode */ + intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; + ++ /* encoder type will be decided later */ ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); ++ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); ++ + /* In default case sdvo lvds is false */ + intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); + + if (intel_sdvo_output_setup(intel_encoder, + sdvo_priv->caps.output_flags) != true) { + DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", +- sdvo_reg == SDVOB ? 'B' : 'C'); ++ IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + goto err_i2c; + } + +- +- connector = &intel_encoder->base; +- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, +- connector->connector_type); +- +- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); +- connector->interlace_allowed = 0; +- connector->doublescan_allowed = 0; +- connector->display_info.subpixel_order = SubPixelHorizontalRGB; +- +- drm_encoder_init(dev, &intel_encoder->enc, +- &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); +- +- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); +- +- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); +- if (sdvo_priv->is_tv) +- intel_sdvo_tv_create_property(connector); +- +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) +- intel_sdvo_create_enhance_property(connector); +- +- drm_sysfs_connector_add(connector); +- +- intel_sdvo_select_ddc_bus(sdvo_priv); ++ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); + + /* Set the input timing to the screen. Assume always input 0. */ + intel_sdvo_set_target_input(intel_encoder, true, false); +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c +index d7d39b2..6d553c2 100644 +--- a/drivers/gpu/drm/i915/intel_tv.c ++++ b/drivers/gpu/drm/i915/intel_tv.c +@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) + } + } + +-static void +-intel_tv_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; +- int i; +- +- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); +- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); +- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); +- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); +- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); +- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); +- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); +- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); +- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); +- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); +- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); +- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); +- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); +- +- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); +- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); +- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); +- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); +- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); +- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); +- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); +- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); +- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); +- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); +- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); +- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); +- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); +- +- for (i = 0; i < 60; i++) +- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); +- for (i = 0; i < 60; i++) +- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); +- for (i = 0; i < 43; i++) +- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); +- for (i = 0; i < 43; i++) +- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); +- +- tv_priv->save_TV_DAC = I915_READ(TV_DAC); +- tv_priv->save_TV_CTL = I915_READ(TV_CTL); +-} +- +-static void +-intel_tv_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; +- struct drm_crtc *crtc = connector->encoder->crtc; +- struct intel_crtc *intel_crtc; +- int i; +- +- /* FIXME: No CRTC? */ +- if (!crtc) +- return; +- +- intel_crtc = to_intel_crtc(crtc); +- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); +- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); +- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); +- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); +- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); +- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); +- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); +- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); +- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); +- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); +- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); +- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); +- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); +- +- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); +- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); +- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); +- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); +- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); +- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); +- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); +- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); +- +- { +- int pipeconf_reg = (intel_crtc->pipe == 0) ? +- PIPEACONF : PIPEBCONF; +- int dspcntr_reg = (intel_crtc->plane == 0) ? +- DSPACNTR : DSPBCNTR; +- int pipeconf = I915_READ(pipeconf_reg); +- int dspcntr = I915_READ(dspcntr_reg); +- int dspbase_reg = (intel_crtc->plane == 0) ? +- DSPAADDR : DSPBADDR; +- /* Pipe must be off here */ +- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); +- /* Flush the plane changes */ +- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); +- +- if (!IS_I9XX(dev)) { +- /* Wait for vblank for the disable to take effect */ +- intel_wait_for_vblank(dev); +- } +- +- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); +- /* Wait for vblank for the disable to take effect. */ +- intel_wait_for_vblank(dev); +- +- /* Filter ctl must be set before TV_WIN_SIZE */ +- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); +- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); +- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); +- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); +- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); +- I915_WRITE(pipeconf_reg, pipeconf); +- I915_WRITE(dspcntr_reg, dspcntr); +- /* Flush the plane changes */ +- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); +- } +- +- for (i = 0; i < 60; i++) +- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); +- for (i = 0; i < 60; i++) +- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); +- for (i = 0; i < 43; i++) +- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); +- for (i = 0; i < 43; i++) +- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); +- +- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); +- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); +-} +- + static const struct tv_mode * + intel_tv_mode_lookup (char *tv_format) + { +@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder) + static enum drm_mode_status + intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + + /* Ensure TV refresh is close to desired refresh */ +@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder + */ + static void intel_tv_find_better_format(struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + int i; +@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector) + { + struct drm_crtc *crtc; + struct drm_display_mode mode; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; +- struct drm_encoder *encoder = &intel_encoder->enc; + int dpms_mode; + int type = tv_priv->type; + +@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector) + if (encoder->crtc && encoder->crtc->enabled) { + type = intel_tv_detect_type(encoder->crtc, intel_encoder); + } else { +- crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); ++ crtc = intel_get_load_detect_pipe(intel_encoder, connector, ++ &mode, &dpms_mode); + if (crtc) { + type = intel_tv_detect_type(crtc, intel_encoder); +- intel_release_load_detect_pipe(intel_encoder, dpms_mode); ++ intel_release_load_detect_pipe(intel_encoder, connector, ++ dpms_mode); + } else + type = -1; + } +@@ -1525,7 +1392,8 @@ static void + intel_tv_chose_preferred_modes(struct drm_connector *connector, + struct drm_display_mode *mode_ptr) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + + if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) +@@ -1550,7 +1418,8 @@ static int + intel_tv_get_modes(struct drm_connector *connector) + { + struct drm_display_mode *mode_ptr; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + int j, count = 0; + u64 tmp; +@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector) + static void + intel_tv_destroy (struct drm_connector *connector) + { +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); +- + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_encoder); ++ kfree(connector); + } + + +@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop + uint64_t val) + { + struct drm_device *dev = connector->dev; +- struct intel_encoder *intel_encoder = to_intel_encoder(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; +- struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_crtc *crtc = encoder->crtc; + int ret = 0; + bool changed = false; +@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { + + static const struct drm_connector_funcs intel_tv_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_tv_save, +- .restore = intel_tv_restore, + .detect = intel_tv_detect, + .destroy = intel_tv_destroy, + .set_property = intel_tv_set_property, +@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { + static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { + .mode_valid = intel_tv_mode_valid, + .get_modes = intel_tv_get_modes, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_tv_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_tv_enc_funcs = { +@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev) + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_tv_priv *tv_priv; + u32 tv_dac_on, tv_dac_off, save_tv_dac; + char **tv_format_names; +@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev) + return; + } + +- connector = &intel_encoder->base; ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ ++ connector = &intel_connector->base; + + drm_connector_init(dev, connector, &intel_tv_connector_funcs, + DRM_MODE_CONNECTOR_SVIDEO); +@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev) + drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, + DRM_MODE_ENCODER_TVDAC); + +- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); ++ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); + intel_encoder->type = INTEL_OUTPUT_TVOUT; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); +diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile +index 453df3f..acd31ed 100644 +--- a/drivers/gpu/drm/nouveau/Makefile ++++ b/drivers/gpu/drm/nouveau/Makefile +@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ + nv50_cursor.o nv50_display.o nv50_fbcon.o \ + nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ + nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ +- nv17_gpio.o nv50_gpio.o ++ nv17_gpio.o nv50_gpio.o \ ++ nv50_calc.o + + nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o + nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o +diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c +index e13f6af..d4bcca8 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c ++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c +@@ -34,7 +34,7 @@ + static struct nouveau_dsm_priv { + bool dsm_detected; + acpi_handle dhandle; +- acpi_handle dsm_handle; ++ acpi_handle rom_handle; + } nouveau_dsm_priv; + + static const char nouveau_dsm_muid[] = { +@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero + static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) + { + if (id == VGA_SWITCHEROO_IGD) +- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); ++ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); + else +- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); ++ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); + } + + static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, +@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, + if (id == VGA_SWITCHEROO_IGD) + return 0; + +- return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); ++ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); + } + + static int nouveau_dsm_init(void) +@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; ++ + status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); + if (ACPI_FAILURE(status)) { + return false; + } + +- ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, +- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); ++ ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, ++ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); + if (ret < 0) + return false; + + nouveau_dsm_priv.dhandle = dhandle; +- nouveau_dsm_priv.dsm_handle = nvidia_handle; + return true; + } + +@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void) + struct pci_dev *pdev = NULL; + int has_dsm = 0; + int vga_count = 0; ++ + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + +@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void) + } + + if (vga_count == 2 && has_dsm) { +- acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); ++ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); + printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + nouveau_dsm_priv.dsm_detected = true; +@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void) + { + vga_switcheroo_unregister_handler(); + } ++ ++/* retrieve the ROM in 4k blocks */ ++static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, ++ int offset, int len) ++{ ++ acpi_status status; ++ union acpi_object rom_arg_elements[2], *obj; ++ struct acpi_object_list rom_arg; ++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; ++ ++ rom_arg.count = 2; ++ rom_arg.pointer = &rom_arg_elements[0]; ++ ++ rom_arg_elements[0].type = ACPI_TYPE_INTEGER; ++ rom_arg_elements[0].integer.value = offset; ++ ++ rom_arg_elements[1].type = ACPI_TYPE_INTEGER; ++ rom_arg_elements[1].integer.value = len; ++ ++ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); ++ if (ACPI_FAILURE(status)) { ++ printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); ++ return -ENODEV; ++ } ++ obj = (union acpi_object *)buffer.pointer; ++ memcpy(bios+offset, obj->buffer.pointer, len); ++ kfree(buffer.pointer); ++ return len; ++} ++ ++bool nouveau_acpi_rom_supported(struct pci_dev *pdev) ++{ ++ acpi_status status; ++ acpi_handle dhandle, rom_handle; ++ ++ if (!nouveau_dsm_priv.dsm_detected) ++ return false; ++ ++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); ++ if (!dhandle) ++ return false; ++ ++ status = acpi_get_handle(dhandle, "_ROM", &rom_handle); ++ if (ACPI_FAILURE(status)) ++ return false; ++ ++ nouveau_dsm_priv.rom_handle = rom_handle; ++ return true; ++} ++ ++int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) ++{ ++ return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c +index abc382a..fc924b6 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -26,6 +26,7 @@ + #define NV_DEBUG_NOTRACE + #include "nouveau_drv.h" + #include "nouveau_hw.h" ++#include "nouveau_encoder.h" + + /* these defines are made up */ + #define NV_CIO_CRE_44_HEADA 0x0 +@@ -177,6 +178,25 @@ out: + pci_disable_rom(dev->pdev); + } + ++static void load_vbios_acpi(struct drm_device *dev, uint8_t *data) ++{ ++ int i; ++ int ret; ++ int size = 64 * 1024; ++ ++ if (!nouveau_acpi_rom_supported(dev->pdev)) ++ return; ++ ++ for (i = 0; i < (size / ROM_BIOS_PAGE); i++) { ++ ret = nouveau_acpi_get_bios_chunk(data, ++ (i * ROM_BIOS_PAGE), ++ ROM_BIOS_PAGE); ++ if (ret <= 0) ++ break; ++ } ++ return; ++} ++ + struct methods { + const char desc[8]; + void (*loadbios)(struct drm_device *, uint8_t *); +@@ -190,6 +210,7 @@ static struct methods nv04_methods[] = { + }; + + static struct methods nv50_methods[] = { ++ { "ACPI", load_vbios_acpi, true }, + { "PRAMIN", load_vbios_pramin, true }, + { "PROM", load_vbios_prom, false }, + { "PCIROM", load_vbios_pci, true }, +@@ -256,6 +277,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) + struct init_tbl_entry { + char *name; + uint8_t id; ++ /* Return: ++ * > 0: success, length of opcode ++ * 0: success, but abort further parsing of table (INIT_DONE etc) ++ * < 0: failure, table parsing will be aborted ++ */ + int (*handler)(struct nvbios *, uint16_t, struct init_exec *); + }; + +@@ -709,6 +735,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev) + return dcb_entry; + } + ++static int ++read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) ++{ ++ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; ++ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; ++ int recordoffset = 0, rdofs = 1, wrofs = 0; ++ uint8_t port_type = 0; ++ ++ if (!i2ctable) ++ return -EINVAL; ++ ++ if (dcb_version >= 0x30) { ++ if (i2ctable[0] != dcb_version) /* necessary? */ ++ NV_WARN(dev, ++ "DCB I2C table version mismatch (%02X vs %02X)\n", ++ i2ctable[0], dcb_version); ++ dcb_i2c_ver = i2ctable[0]; ++ headerlen = i2ctable[1]; ++ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) ++ i2c_entries = i2ctable[2]; ++ else ++ NV_WARN(dev, ++ "DCB I2C table has more entries than indexable " ++ "(%d entries, max %d)\n", i2ctable[2], ++ DCB_MAX_NUM_I2C_ENTRIES); ++ entry_len = i2ctable[3]; ++ /* [4] is i2c_default_indices, read in parse_dcb_table() */ ++ } ++ /* ++ * It's your own fault if you call this function on a DCB 1.1 BIOS -- ++ * the test below is for DCB 1.2 ++ */ ++ if (dcb_version < 0x14) { ++ recordoffset = 2; ++ rdofs = 0; ++ wrofs = 1; ++ } ++ ++ if (index == 0xf) ++ return 0; ++ if (index >= i2c_entries) { ++ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", ++ index, i2ctable[2]); ++ return -ENOENT; ++ } ++ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { ++ NV_ERROR(dev, "DCB I2C entry invalid\n"); ++ return -EINVAL; ++ } ++ ++ if (dcb_i2c_ver >= 0x30) { ++ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; ++ ++ /* ++ * Fixup for chips using same address offset for read and ++ * write. ++ */ ++ if (port_type == 4) /* seen on C51 */ ++ rdofs = wrofs = 1; ++ if (port_type >= 5) /* G80+ */ ++ rdofs = wrofs = 0; ++ } ++ ++ if (dcb_i2c_ver >= 0x40) { ++ if (port_type != 5 && port_type != 6) ++ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); ++ ++ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]); ++ } ++ ++ i2c->port_type = port_type; ++ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; ++ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; ++ ++ return 0; ++} ++ + static struct nouveau_i2c_chan * + init_i2c_device_find(struct drm_device *dev, int i2c_index) + { +@@ -727,6 +830,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index) + } + if (i2c_index == 0x80) /* g80+ */ + i2c_index = dcb->i2c_default_indices & 0xf; ++ else ++ if (i2c_index == 0x81) ++ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4; ++ ++ if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) { ++ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index); ++ return NULL; ++ } ++ ++ /* Make sure i2c table entry has been parsed, it may not ++ * have been if this is a bus not referenced by a DCB encoder ++ */ ++ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, ++ i2c_index, &dcb->i2c[i2c_index]); + + return nouveau_i2c_find(dev, i2c_index); + } +@@ -818,7 +935,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); +- return 0; ++ return -EINVAL; + } + + configval = ROM32(bios->data[offset + 11 + config * 4]); +@@ -920,7 +1037,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); +- return 0; ++ return -EINVAL; + } + + freq = ROM16(bios->data[offset + 12 + config * 2]); +@@ -1067,6 +1184,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset, + } + + static int ++init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ++{ ++ /* ++ * INIT_DP_CONDITION opcode: 0x3A ('') ++ * ++ * offset (8 bit): opcode ++ * offset + 1 (8 bit): "sub" opcode ++ * offset + 2 (8 bit): unknown ++ * ++ */ ++ ++ struct bit_displayport_encoder_table *dpe = NULL; ++ struct dcb_entry *dcb = bios->display.output; ++ struct drm_device *dev = bios->dev; ++ uint8_t cond = bios->data[offset + 1]; ++ int dummy; ++ ++ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); ++ ++ if (!iexec->execute) ++ return 3; ++ ++ dpe = nouveau_bios_dp_table(dev, dcb, &dummy); ++ if (!dpe) { ++ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); ++ return -EINVAL; ++ } ++ ++ switch (cond) { ++ case 0: ++ { ++ struct dcb_connector_table_entry *ent = ++ &bios->dcb.connector.entry[dcb->connector]; ++ ++ if (ent->type != DCB_CONNECTOR_eDP) ++ iexec->execute = false; ++ } ++ break; ++ case 1: ++ case 2: ++ if (!(dpe->unknown & cond)) ++ iexec->execute = false; ++ break; ++ case 5: ++ { ++ struct nouveau_i2c_chan *auxch; ++ int ret; ++ ++ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); ++ if (!auxch) ++ return -ENODEV; ++ ++ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); ++ if (ret) ++ return ret; ++ ++ if (cond & 1) ++ iexec->execute = false; ++ } ++ break; ++ default: ++ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond); ++ break; ++ } ++ ++ if (iexec->execute) ++ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset); ++ else ++ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset); ++ ++ return 3; ++} ++ ++static int ++init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ++{ ++ /* ++ * INIT_3B opcode: 0x3B ('') ++ * ++ * offset (8 bit): opcode ++ * offset + 1 (8 bit): crtc index ++ * ++ */ ++ ++ uint8_t or = ffs(bios->display.output->or) - 1; ++ uint8_t index = bios->data[offset + 1]; ++ uint8_t data; ++ ++ if (!iexec->execute) ++ return 2; ++ ++ data = bios_idxprt_rd(bios, 0x3d4, index); ++ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or)); ++ return 2; ++} ++ ++static int ++init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ++{ ++ /* ++ * INIT_3C opcode: 0x3C ('') ++ * ++ * offset (8 bit): opcode ++ * offset + 1 (8 bit): crtc index ++ * ++ */ ++ ++ uint8_t or = ffs(bios->display.output->or) - 1; ++ uint8_t index = bios->data[offset + 1]; ++ uint8_t data; ++ ++ if (!iexec->execute) ++ return 2; ++ ++ data = bios_idxprt_rd(bios, 0x3d4, index); ++ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or)); ++ return 2; ++} ++ ++static int + init_idx_addr_latched(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) + { +@@ -1170,7 +1407,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); +- return 0; ++ return -EINVAL; + } + + freq = ROM32(bios->data[offset + 11 + config * 4]); +@@ -1231,12 +1468,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + */ + + uint8_t i2c_index = bios->data[offset + 1]; +- uint8_t i2c_address = bios->data[offset + 2]; ++ uint8_t i2c_address = bios->data[offset + 2] >> 1; + uint8_t count = bios->data[offset + 3]; +- int len = 4 + count * 3; + struct nouveau_i2c_chan *chan; +- struct i2c_msg msg; +- int i; ++ int len = 4 + count * 3; ++ int ret, i; + + if (!iexec->execute) + return len; +@@ -1247,35 +1483,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) +- return 0; ++ return -ENODEV; + + for (i = 0; i < count; i++) { +- uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; ++ uint8_t reg = bios->data[offset + 4 + i * 3]; + uint8_t mask = bios->data[offset + 5 + i * 3]; + uint8_t data = bios->data[offset + 6 + i * 3]; +- uint8_t value; ++ union i2c_smbus_data val; + +- msg.addr = i2c_address; +- msg.flags = I2C_M_RD; +- msg.len = 1; +- msg.buf = &value; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; ++ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, ++ I2C_SMBUS_READ, reg, ++ I2C_SMBUS_BYTE_DATA, &val); ++ if (ret < 0) ++ return ret; + + BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " + "Mask: 0x%02X, Data: 0x%02X\n", +- offset, i2c_reg, value, mask, data); ++ offset, reg, val.byte, mask, data); + +- value = (value & mask) | data; ++ if (!bios->execute) ++ continue; + +- if (bios->execute) { +- msg.addr = i2c_address; +- msg.flags = 0; +- msg.len = 1; +- msg.buf = &value; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; +- } ++ val.byte &= mask; ++ val.byte |= data; ++ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, ++ I2C_SMBUS_WRITE, reg, ++ I2C_SMBUS_BYTE_DATA, &val); ++ if (ret < 0) ++ return ret; + } + + return len; +@@ -1301,12 +1536,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + */ + + uint8_t i2c_index = bios->data[offset + 1]; +- uint8_t i2c_address = bios->data[offset + 2]; ++ uint8_t i2c_address = bios->data[offset + 2] >> 1; + uint8_t count = bios->data[offset + 3]; +- int len = 4 + count * 2; + struct nouveau_i2c_chan *chan; +- struct i2c_msg msg; +- int i; ++ int len = 4 + count * 2; ++ int ret, i; + + if (!iexec->execute) + return len; +@@ -1317,23 +1551,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) +- return 0; ++ return -ENODEV; + + for (i = 0; i < count; i++) { +- uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; +- uint8_t data = bios->data[offset + 5 + i * 2]; ++ uint8_t reg = bios->data[offset + 4 + i * 2]; ++ union i2c_smbus_data val; ++ ++ val.byte = bios->data[offset + 5 + i * 2]; + + BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n", +- offset, i2c_reg, data); +- +- if (bios->execute) { +- msg.addr = i2c_address; +- msg.flags = 0; +- msg.len = 1; +- msg.buf = &data; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; +- } ++ offset, reg, val.byte); ++ ++ if (!bios->execute) ++ continue; ++ ++ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, ++ I2C_SMBUS_WRITE, reg, ++ I2C_SMBUS_BYTE_DATA, &val); ++ if (ret < 0) ++ return ret; + } + + return len; +@@ -1357,7 +1593,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + */ + + uint8_t i2c_index = bios->data[offset + 1]; +- uint8_t i2c_address = bios->data[offset + 2]; ++ uint8_t i2c_address = bios->data[offset + 2] >> 1; + uint8_t count = bios->data[offset + 3]; + int len = 4 + count; + struct nouveau_i2c_chan *chan; +@@ -1374,7 +1610,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) +- return 0; ++ return -ENODEV; + + for (i = 0; i < count; i++) { + data[i] = bios->data[offset + 4 + i]; +@@ -1388,7 +1624,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + msg.len = count; + msg.buf = data; + if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; ++ return -EIO; + } + + return len; +@@ -1427,7 +1663,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + reg = get_tmds_index_reg(bios->dev, mlv); + if (!reg) +- return 0; ++ return -EINVAL; + + bios_wr32(bios, reg, + tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); +@@ -1471,7 +1707,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, + + reg = get_tmds_index_reg(bios->dev, mlv); + if (!reg) +- return 0; ++ return -EINVAL; + + for (i = 0; i < count; i++) { + uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; +@@ -1946,7 +2182,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, + uint32_t reg, data; + + if (bios->major_version > 2) +- return 0; ++ return -ENODEV; + + bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( + bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); +@@ -2001,7 +2237,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, + int clock; + + if (bios->major_version > 2) +- return 0; ++ return -ENODEV; + + clock = ROM16(bios->data[meminitoffs + 4]) * 10; + setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); +@@ -2034,7 +2270,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, + uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); + + if (bios->major_version > 2) +- return 0; ++ return -ENODEV; + + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, + NV_CIO_CRE_SCRATCH4__INDEX, cr3c); +@@ -2591,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); + +- nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); ++ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", ++ offset, gpio->tag, gpio->state_default); ++ if (bios->execute) ++ nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); + + /* The NVIDIA binary driver doesn't appear to actually do + * any of this, my VBIOS does however. +@@ -2656,7 +2895,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Zero block length - has the M table " + "been parsed?\n", offset); +- return 0; ++ return -EINVAL; + } + + strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; +@@ -2840,14 +3079,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + if (!bios->display.output) { + NV_ERROR(dev, "INIT_AUXCH: no active output\n"); +- return 0; ++ return -EINVAL; + } + + auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); + if (!auxch) { + NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", + bios->display.output->i2c_index); +- return 0; ++ return -ENODEV; + } + + if (!iexec->execute) +@@ -2860,7 +3099,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); + if (ret) { + NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); +- return 0; ++ return ret; + } + + data &= bios->data[offset + 0]; +@@ -2869,7 +3108,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); + if (ret) { + NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); +- return 0; ++ return ret; + } + } + +@@ -2899,14 +3138,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + if (!bios->display.output) { + NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); +- return 0; ++ return -EINVAL; + } + + auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); + if (!auxch) { + NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", + bios->display.output->i2c_index); +- return 0; ++ return -ENODEV; + } + + if (!iexec->execute) +@@ -2917,7 +3156,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); + if (ret) { + NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); +- return 0; ++ return ret; + } + } + +@@ -2934,6 +3173,9 @@ static struct init_tbl_entry itbl_entry[] = { + { "INIT_COPY" , 0x37, init_copy }, + { "INIT_NOT" , 0x38, init_not }, + { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition }, ++ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition }, ++ { "INIT_OP_3B" , 0x3B, init_op_3b }, ++ { "INIT_OP_3C" , 0x3C, init_op_3c }, + { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched }, + { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 }, + { "INIT_PLL2" , 0x4B, init_pll2 }, +@@ -3001,7 +3243,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset, + * is changed back to EXECUTE. + */ + +- int count = 0, i, res; ++ int count = 0, i, ret; + uint8_t id; + + /* +@@ -3016,26 +3258,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset, + for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++) + ; + +- if (itbl_entry[i].name) { +- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", +- offset, itbl_entry[i].id, itbl_entry[i].name); +- +- /* execute eventual command handler */ +- res = (*itbl_entry[i].handler)(bios, offset, iexec); +- if (!res) +- break; +- /* +- * Add the offset of the current command including all data +- * of that command. The offset will then be pointing on the +- * next op code. +- */ +- offset += res; +- } else { ++ if (!itbl_entry[i].name) { + NV_ERROR(bios->dev, + "0x%04X: Init table command not found: " + "0x%02X\n", offset, id); + return -ENOENT; + } ++ ++ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset, ++ itbl_entry[i].id, itbl_entry[i].name); ++ ++ /* execute eventual command handler */ ++ ret = (*itbl_entry[i].handler)(bios, offset, iexec); ++ if (ret < 0) { ++ NV_ERROR(bios->dev, "0x%04X: Failed parsing init " ++ "table opcode: %s %d\n", offset, ++ itbl_entry[i].name, ret); ++ } ++ ++ if (ret <= 0) ++ break; ++ ++ /* ++ * Add the offset of the current command including all data ++ * of that command. The offset will then be pointing on the ++ * next op code. ++ */ ++ offset += ret; + } + + if (offset >= bios->length) +@@ -3671,7 +3920,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b + + static uint8_t * + bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, +- uint16_t record, int record_len, int record_nr) ++ uint16_t record, int record_len, int record_nr, ++ bool match_link) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; +@@ -3679,12 +3929,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, + uint16_t table; + int i, v; + ++ switch (dcbent->type) { ++ case OUTPUT_TMDS: ++ case OUTPUT_LVDS: ++ case OUTPUT_DP: ++ break; ++ default: ++ match_link = false; ++ break; ++ } ++ + for (i = 0; i < record_nr; i++, record += record_len) { + table = ROM16(bios->data[record]); + if (!table) + continue; + entry = ROM32(bios->data[table]); + ++ if (match_link) { ++ v = (entry & 0x00c00000) >> 22; ++ if (!(v & dcbent->sorconf.link)) ++ continue; ++ } ++ + v = (entry & 0x000f0000) >> 16; + if (!(v & dcbent->or)) + continue; +@@ -3726,7 +3992,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, + *length = table[4]; + return bios_output_config_match(dev, dcbent, + bios->display.dp_table_ptr + table[1], +- table[2], table[3]); ++ table[2], table[3], table[0] >= 0x21); + } + + int +@@ -3815,7 +4081,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, + dcbent->type, dcbent->location, dcbent->or); + otable = bios_output_config_match(dev, dcbent, table[1] + + bios->display.script_table_ptr, +- table[2], table[3]); ++ table[2], table[3], table[0] >= 0x21); + if (!otable) { + NV_ERROR(dev, "Couldn't find matching output script table\n"); + return 1; +@@ -4285,31 +4551,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims + break; + } + +-#if 0 /* for easy debugging */ +- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); +- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); +- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); +- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); +- +- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); +- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); +- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); +- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); +- +- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); +- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); +- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); +- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); +- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); +- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); +- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); +- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); +- +- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p); +- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias); +- +- ErrorF("pll.refclk: %d\n", pll_lim->refclk); +-#endif ++ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); ++ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); ++ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); ++ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); ++ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); ++ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); ++ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); ++ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); ++ if (pll_lim->vco2.maxfreq) { ++ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); ++ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); ++ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); ++ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); ++ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); ++ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); ++ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); ++ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); ++ } ++ if (!pll_lim->max_p) { ++ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p); ++ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias); ++ } else { ++ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p); ++ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p); ++ } ++ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk); + + return 0; + } +@@ -4953,79 +5220,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) + return 0; + } + +-static int +-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) +-{ +- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; +- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; +- int recordoffset = 0, rdofs = 1, wrofs = 0; +- uint8_t port_type = 0; +- +- if (!i2ctable) +- return -EINVAL; +- +- if (dcb_version >= 0x30) { +- if (i2ctable[0] != dcb_version) /* necessary? */ +- NV_WARN(dev, +- "DCB I2C table version mismatch (%02X vs %02X)\n", +- i2ctable[0], dcb_version); +- dcb_i2c_ver = i2ctable[0]; +- headerlen = i2ctable[1]; +- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) +- i2c_entries = i2ctable[2]; +- else +- NV_WARN(dev, +- "DCB I2C table has more entries than indexable " +- "(%d entries, max %d)\n", i2ctable[2], +- DCB_MAX_NUM_I2C_ENTRIES); +- entry_len = i2ctable[3]; +- /* [4] is i2c_default_indices, read in parse_dcb_table() */ +- } +- /* +- * It's your own fault if you call this function on a DCB 1.1 BIOS -- +- * the test below is for DCB 1.2 +- */ +- if (dcb_version < 0x14) { +- recordoffset = 2; +- rdofs = 0; +- wrofs = 1; +- } +- +- if (index == 0xf) +- return 0; +- if (index >= i2c_entries) { +- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", +- index, i2ctable[2]); +- return -ENOENT; +- } +- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { +- NV_ERROR(dev, "DCB I2C entry invalid\n"); +- return -EINVAL; +- } +- +- if (dcb_i2c_ver >= 0x30) { +- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; +- +- /* +- * Fixup for chips using same address offset for read and +- * write. +- */ +- if (port_type == 4) /* seen on C51 */ +- rdofs = wrofs = 1; +- if (port_type >= 5) /* G80+ */ +- rdofs = wrofs = 0; +- } +- +- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6) +- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); +- +- i2c->port_type = port_type; +- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; +- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; +- +- return 0; +-} +- + static struct dcb_gpio_entry * + new_gpio_entry(struct nvbios *bios) + { +@@ -5379,12 +5573,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, + entry->bus = (conn >> 16) & 0xf; + entry->location = (conn >> 20) & 0x3; + entry->or = (conn >> 24) & 0xf; +- /* +- * Normal entries consist of a single bit, but dual link has the +- * next most significant bit set too +- */ +- entry->duallink_possible = +- ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); + + switch (entry->type) { + case OUTPUT_ANALOG: +@@ -5468,6 +5656,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, + break; + } + ++ if (dcb->version < 0x40) { ++ /* Normal entries consist of a single bit, but dual link has ++ * the next most significant bit set too ++ */ ++ entry->duallink_possible = ++ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); ++ } else { ++ entry->duallink_possible = (entry->sorconf.link == 3); ++ } ++ + /* unsure what DCB version introduces this, 3.0? */ + if (conf & 0x100000) + entry->i2c_upper_default = true; +@@ -6051,6 +6249,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev) + nouveau_i2c_fini(dev, entry); + } + ++static bool ++nouveau_bios_posted(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ bool was_locked; ++ unsigned htotal; ++ ++ if (dev_priv->chipset >= NV_50) { ++ if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && ++ NVReadVgaCrtc(dev, 0, 0x1a) == 0) ++ return false; ++ return true; ++ } ++ ++ was_locked = NVLockVgaCrtcs(dev, false); ++ htotal = NVReadVgaCrtc(dev, 0, 0x06); ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; ++ NVLockVgaCrtcs(dev, was_locked); ++ return (htotal != 0); ++} ++ + int + nouveau_bios_init(struct drm_device *dev) + { +@@ -6085,11 +6307,9 @@ nouveau_bios_init(struct drm_device *dev) + bios->execute = false; + + /* ... unless card isn't POSTed already */ +- if (dev_priv->card_type >= NV_10 && +- NVReadVgaCrtc(dev, 0, 0x00) == 0 && +- NVReadVgaCrtc(dev, 0, 0x1a) == 0) { ++ if (!nouveau_bios_posted(dev)) { + NV_INFO(dev, "Adaptor not initialised\n"); +- if (dev_priv->card_type < NV_50) { ++ if (dev_priv->card_type < NV_40) { + NV_ERROR(dev, "Unable to POST this chipset\n"); + return -ENODEV; + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h +index c0d7b0a..adf4ec2 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.h ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h +@@ -35,6 +35,7 @@ + #define DCB_LOC_ON_CHIP 0 + + struct dcb_i2c_entry { ++ uint32_t entry; + uint8_t port_type; + uint8_t read, write; + struct nouveau_i2c_chan *chan; +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c +index 957d176..6f3c195 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bo.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c +@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, + ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, + ttm_bo_type_device, &nvbo->placement, align, 0, + false, NULL, size, nouveau_bo_del_ttm); +- nvbo->channel = NULL; + if (ret) { + /* ttm will call nouveau_bo_del_ttm if it fails.. */ + return ret; + } ++ nvbo->channel = NULL; + + spin_lock(&dev_priv->ttm.bo_list_lock); + list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); +@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) + + nouveau_bo_placement_set(nvbo, memtype, 0); + +- ret = ttm_bo_validate(bo, &nvbo->placement, false, false); ++ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + if (ret == 0) { + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: +@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) + + nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); + +- ret = ttm_bo_validate(bo, &nvbo->placement, false, false); ++ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + if (ret == 0) { + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: +@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + break; + case TTM_PL_VRAM: + man->flags = TTM_MEMTYPE_FLAG_FIXED | +- TTM_MEMTYPE_FLAG_MAPPABLE | +- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; ++ TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; +- +- man->io_addr = NULL; +- man->io_offset = drm_get_resource_start(dev, 1); +- man->io_size = drm_get_resource_len(dev, 1); +- if (man->io_size > dev_priv->vram_size) +- man->io_size = dev_priv->vram_size; +- + man->gpu_offset = dev_priv->vm_vram_base; + break; + case TTM_PL_TT: + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: +- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | +- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; ++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; + break; +@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + dev_priv->gart_info.type); + return -EINVAL; + } +- +- man->io_offset = dev_priv->gart_info.aper_base; +- man->io_size = dev_priv->gart_info.aper_size; +- man->io_addr = NULL; + man->gpu_offset = dev_priv->vm_gart_base; + break; + default: +@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) + + static int + nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, +- struct nouveau_bo *nvbo, bool evict, bool no_wait, ++ struct nouveau_bo *nvbo, bool evict, ++ bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) + { + struct nouveau_fence *fence = NULL; +@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, + return ret; + + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, +- evict, no_wait, new_mem); ++ evict, no_wait_reserve, no_wait_gpu, new_mem); + if (nvbo->channel && nvbo->channel != chan) + ret = nouveau_fence_wait(fence, NULL, false, false); + nouveau_fence_unref((void *)&fence); +@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + + static int + nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, +- int no_wait, struct ttm_mem_reg *new_mem) ++ bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem) + { + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); +@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, + dst_offset += (PAGE_SIZE * line_count); + } + +- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); ++ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); + } + + static int + nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, +- bool no_wait, struct ttm_mem_reg *new_mem) ++ bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem) + { + u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_placement placement; +@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; +- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); ++ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); + if (ret) + return ret; + +@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, + if (ret) + goto out; + +- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); ++ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); + if (ret) + goto out; + +- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); ++ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + out: + if (tmp_mem.mm_node) { + spin_lock(&bo->bdev->glob->lru_lock); +@@ -618,7 +608,8 @@ out: + + static int + nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, +- bool no_wait, struct ttm_mem_reg *new_mem) ++ bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem) + { + u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_placement placement; +@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; +- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); ++ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); + if (ret) + return ret; + +- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); ++ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); + if (ret) + goto out; + +- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); ++ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); + if (ret) + goto out; + +@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, + + static int + nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, +- bool no_wait, struct ttm_mem_reg *new_mem) ++ bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem) + { + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); +@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, + /* Software copy if the card isn't up and running yet. */ + if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || + !dev_priv->channel) { +- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + goto out; + } + +@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, + + /* Hardware assisted copy. */ + if (new_mem->mem_type == TTM_PL_SYSTEM) +- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); ++ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); + else if (old_mem->mem_type == TTM_PL_SYSTEM) +- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); ++ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); + else +- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); ++ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); + + if (!ret) + goto out; + + /* Fallback to software copy. */ +- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + + out: + if (ret) +@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) + return 0; + } + ++static int ++nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ++ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); ++ struct drm_device *dev = dev_priv->dev; ++ ++ mem->bus.addr = NULL; ++ mem->bus.offset = 0; ++ mem->bus.size = mem->num_pages << PAGE_SHIFT; ++ mem->bus.base = 0; ++ mem->bus.is_iomem = false; ++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) ++ return -EINVAL; ++ switch (mem->mem_type) { ++ case TTM_PL_SYSTEM: ++ /* System memory */ ++ return 0; ++ case TTM_PL_TT: ++#if __OS_HAS_AGP ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { ++ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; ++ mem->bus.base = dev_priv->gart_info.aper_base; ++ mem->bus.is_iomem = true; ++ } ++#endif ++ break; ++ case TTM_PL_VRAM: ++ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; ++ mem->bus.base = drm_get_resource_start(dev, 1); ++ mem->bus.is_iomem = true; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static void ++nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++} ++ ++static int ++nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) ++{ ++ return 0; ++} ++ + struct ttm_bo_driver nouveau_bo_driver = { + .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .invalidate_caches = nouveau_bo_invalidate_caches, +@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = { + .sync_obj_flush = nouveau_fence_flush, + .sync_obj_unref = nouveau_fence_unref, + .sync_obj_ref = nouveau_fence_ref, ++ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, ++ .io_mem_reserve = &nouveau_ttm_io_mem_reserve, ++ .io_mem_free = &nouveau_ttm_io_mem_free, + }; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index 14afe1e..149ed22 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector) + if (nv_encoder && nv_connector->native_mode) { + unsigned status = connector_status_connected; + +-#ifdef CONFIG_ACPI ++#if defined(CONFIG_ACPI_BUTTON) || \ ++ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) + if (!nouveau_ignorelid && !acpi_lid_open()) + status = connector_status_unknown; + #endif +@@ -431,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector, + } + + static struct drm_display_mode * +-nouveau_connector_native_mode(struct nouveau_connector *connector) ++nouveau_connector_native_mode(struct drm_connector *connector) + { +- struct drm_device *dev = connector->base.dev; ++ struct drm_connector_helper_funcs *helper = connector->helper_private; ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *largest = NULL; + int high_w = 0, high_h = 0, high_v = 0; + +- /* Use preferred mode if there is one.. */ +- list_for_each_entry(mode, &connector->base.probed_modes, head) { ++ list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { ++ if (helper->mode_valid(connector, mode) != MODE_OK) ++ continue; ++ ++ /* Use preferred mode if there is one.. */ + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + NV_DEBUG_KMS(dev, "native mode from preferred\n"); + return drm_mode_duplicate(dev, mode); + } +- } + +- /* Otherwise, take the resolution with the largest width, then height, +- * then vertical refresh +- */ +- list_for_each_entry(mode, &connector->base.probed_modes, head) { ++ /* Otherwise, take the resolution with the largest width, then ++ * height, then vertical refresh ++ */ + if (mode->hdisplay < high_w) + continue; + +@@ -552,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) + */ + if (!nv_connector->native_mode) + nv_connector->native_mode = +- nouveau_connector_native_mode(nv_connector); ++ nouveau_connector_native_mode(connector); + if (ret == 0 && nv_connector->native_mode) { + struct drm_display_mode *mode; + +@@ -583,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, + + switch (nv_encoder->dcb->type) { + case OUTPUT_LVDS: +- BUG_ON(!nv_connector->native_mode); +- if (mode->hdisplay > nv_connector->native_mode->hdisplay || +- mode->vdisplay > nv_connector->native_mode->vdisplay) ++ if (nv_connector->native_mode && ++ (mode->hdisplay > nv_connector->native_mode->hdisplay || ++ mode->vdisplay > nv_connector->native_mode->vdisplay)) + return MODE_PANEL; + + min_clock = 0; +@@ -593,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, + break; + case OUTPUT_TMDS: + if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || +- (dev_priv->card_type < NV_50 && +- !nv_encoder->dcb->duallink_possible)) ++ !nv_encoder->dcb->duallink_possible) + max_clock = 165000; + else + max_clock = 330000; +@@ -728,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, + if (ret == 0) + goto out; + nv_connector->detected_encoder = nv_encoder; +- nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); ++ nv_connector->native_mode = nouveau_connector_native_mode(connector); + list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) + drm_mode_remove(connector, mode); + +@@ -843,6 +846,7 @@ nouveau_connector_create(struct drm_device *dev, + + switch (dcb->type) { + case DCB_CONNECTOR_VGA: ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (dev_priv->card_type >= NV_50) { + drm_connector_attach_property(connector, + dev->mode_config.scaling_mode_property, +@@ -854,6 +858,17 @@ nouveau_connector_create(struct drm_device *dev, + case DCB_CONNECTOR_TV_3: + nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; + break; ++ case DCB_CONNECTOR_DP: ++ case DCB_CONNECTOR_eDP: ++ case DCB_CONNECTOR_HDMI_0: ++ case DCB_CONNECTOR_HDMI_1: ++ case DCB_CONNECTOR_DVI_I: ++ case DCB_CONNECTOR_DVI_D: ++ if (dev_priv->card_type >= NV_50) ++ connector->polled = DRM_CONNECTOR_POLL_HPD; ++ else ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ /* fall-through */ + default: + nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h +index 49fa7b2..cb1ce2a 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h ++++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h +@@ -40,6 +40,8 @@ struct nouveau_crtc { + int sharpness; + int last_dpms; + ++ int cursor_saved_x, cursor_saved_y; ++ + struct { + int cpp; + bool blanked; +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c +index a251886..7933de4 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c +@@ -33,6 +33,8 @@ + #include "drmP.h" + #include "nouveau_drv.h" + ++#include ++ + static int + nouveau_debugfs_channel_info(struct seq_file *m, void *data) + { +@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { + { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, + { "memory", nouveau_debugfs_memory_info, 0, NULL }, + { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, ++ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, + }; + #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c +index cf1c5c0..74e6b4e 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c +@@ -34,10 +34,6 @@ static void + nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) + { + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); +- struct drm_device *dev = drm_fb->dev; +- +- if (drm_fb->fbdev) +- nouveau_fbcon_remove(dev, drm_fb); + + if (fb->nvbo) + drm_gem_object_unreference_unlocked(fb->nvbo->gem); +@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { + .create_handle = nouveau_user_framebuffer_create_handle, + }; + +-struct drm_framebuffer * +-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo, +- struct drm_mode_fb_cmd *mode_cmd) ++int ++nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, ++ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo) + { +- struct nouveau_framebuffer *fb; + int ret; + +- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); +- if (!fb) +- return NULL; +- +- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs); ++ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs); + if (ret) { +- kfree(fb); +- return NULL; ++ return ret; + } + +- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); +- +- fb->nvbo = nvbo; +- return &fb->base; ++ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd); ++ nouveau_fb->nvbo = nvbo; ++ return 0; + } + + static struct drm_framebuffer * +@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd *mode_cmd) + { +- struct drm_framebuffer *fb; ++ struct nouveau_framebuffer *nouveau_fb; + struct drm_gem_object *gem; ++ int ret; + + gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); + if (!gem) + return NULL; + +- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd); +- if (!fb) { ++ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); ++ if (!nouveau_fb) ++ return NULL; ++ ++ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); ++ if (ret) { + drm_gem_object_unreference(gem); + return NULL; + } + +- return fb; ++ return &nouveau_fb->base; + } + + const struct drm_mode_config_funcs nouveau_mode_config_funcs = { + .fb_create = nouveau_user_framebuffer_create, +- .fb_changed = nouveau_fbcon_probe, ++ .output_poll_changed = nouveau_fbcon_output_poll_changed, + }; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c +index 1de974a..2737704 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c +@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan; + struct drm_crtc *crtc; +- uint32_t fbdev_flags; + int ret, i; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) +@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) + return 0; + + NV_INFO(dev, "Disabling fbcon acceleration...\n"); +- fbdev_flags = dev_priv->fbdev_info->flags; +- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; ++ nouveau_fbcon_save_disable_accel(dev); + + NV_INFO(dev, "Unpinning framebuffer(s)...\n"); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +@@ -177,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) + nouveau_bo_unpin(nouveau_fb->nvbo); + } + ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); ++ ++ nouveau_bo_unmap(nv_crtc->cursor.nvbo); ++ nouveau_bo_unpin(nv_crtc->cursor.nvbo); ++ } ++ + NV_INFO(dev, "Evicting buffers...\n"); + ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); + +@@ -230,9 +235,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) + } + + acquire_console_sem(); +- fb_set_suspend(dev_priv->fbdev_info, 1); ++ nouveau_fbcon_set_suspend(dev, 1); + release_console_sem(); +- dev_priv->fbdev_info->flags = fbdev_flags; ++ nouveau_fbcon_restore_accel(dev); + return 0; + + out_abort: +@@ -250,14 +255,12 @@ nouveau_pci_resume(struct pci_dev *pdev) + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + struct drm_crtc *crtc; +- uint32_t fbdev_flags; + int ret, i; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + +- fbdev_flags = dev_priv->fbdev_info->flags; +- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; ++ nouveau_fbcon_save_disable_accel(dev); + + NV_INFO(dev, "We're back, enabling device...\n"); + pci_set_power_state(pdev, PCI_D0); +@@ -318,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev) + nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); + } + ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); ++ int ret; ++ ++ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); ++ if (!ret) ++ ret = nouveau_bo_map(nv_crtc->cursor.nvbo); ++ if (ret) ++ NV_ERROR(dev, "Could not pin/map cursor.\n"); ++ } ++ + if (dev_priv->card_type < NV_50) { + nv04_display_restore(dev); + NVLockVgaCrtcs(dev, false); + } else + nv50_display_init(dev); + ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); ++ ++ nv_crtc->cursor.set_offset(nv_crtc, ++ nv_crtc->cursor.nvbo->bo.offset - ++ dev_priv->vm_vram_base); ++ ++ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, ++ nv_crtc->cursor_saved_y); ++ } ++ + /* Force CLUT to get re-loaded during modeset */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); +@@ -332,13 +357,14 @@ nouveau_pci_resume(struct pci_dev *pdev) + } + + acquire_console_sem(); +- fb_set_suspend(dev_priv->fbdev_info, 0); ++ nouveau_fbcon_set_suspend(dev, 0); + release_console_sem(); + +- nouveau_fbcon_zfill(dev); ++ nouveau_fbcon_zfill_all(dev); + + drm_helper_resume_force_mode(dev); +- dev_priv->fbdev_info->flags = fbdev_flags; ++ ++ nouveau_fbcon_restore_accel(dev); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +index ace630a..c697191 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -535,6 +535,7 @@ struct drm_nouveau_private { + + struct fb_info *fbdev_info; + ++ int fifo_alloc_count; + struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; + + struct nouveau_engine engine; +@@ -621,6 +622,9 @@ struct drm_nouveau_private { + struct { + struct dentry *channel_root; + } debugfs; ++ ++ struct nouveau_fbdev *nfbdev; ++ struct apertures_struct *apertures; + }; + + static inline struct drm_nouveau_private * +@@ -847,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *); + extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); + + /* nouveau_acpi.c */ ++#define ROM_BIOS_PAGE 4096 + #if defined(CONFIG_ACPI) + void nouveau_register_dsm_handler(void); + void nouveau_unregister_dsm_handler(void); ++int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); ++bool nouveau_acpi_rom_supported(struct pci_dev *pdev); + #else + static inline void nouveau_register_dsm_handler(void) {} + static inline void nouveau_unregister_dsm_handler(void) {} ++static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } ++static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } + #endif + + /* nouveau_backlight.c */ +@@ -1166,6 +1175,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); + int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); + int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); + ++/* nv50_calc. */ ++int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, ++ int *N1, int *M1, int *N2, int *M2, int *P); ++int nv50_calc_pll2(struct drm_device *, struct pll_lims *, ++ int clk, int *N, int *fN, int *M, int *P); ++ + #ifndef ioread32_native + #ifdef __BIG_ENDIAN + #define ioread16_native ioread16be +diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h +index 9f28b94..e1df820 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h ++++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h +@@ -48,6 +48,8 @@ struct nouveau_encoder { + union { + struct { + int mc_unknown; ++ uint32_t unk0; ++ uint32_t unk1; + int dpcd_version; + int link_nr; + int link_bw; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h +index 4a3f31a..d432134 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fb.h ++++ b/drivers/gpu/drm/nouveau/nouveau_fb.h +@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb) + + extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; + +-struct drm_framebuffer * +-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *, +- struct drm_mode_fb_cmd *); +- ++int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, ++ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo); + #endif /* __NOUVEAU_FB_H__ */ +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +index 8e7dc1d..c9a4a0d 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +@@ -52,8 +52,8 @@ + static int + nouveau_fbcon_sync(struct fb_info *info) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + int ret, i; +@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = nv04_fbcon_fillrect, + .fb_copyarea = nv04_fbcon_copyarea, + .fb_imageblit = nv04_fbcon_imageblit, +@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = nv50_fbcon_fillrect, + .fb_copyarea = nv50_fbcon_copyarea, + .fb_imageblit = nv50_fbcon_imageblit, +@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + *blue = nv_crtc->lut.b[regno]; + } + +-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { +- .gamma_set = nouveau_fbcon_gamma_set, +- .gamma_get = nouveau_fbcon_gamma_get +-}; +- +-#if defined(__i386__) || defined(__x86_64__) +-static bool +-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev) ++static void ++nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) + { +- struct pci_dev *pdev = dev->pdev; +- int ramin; +- +- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB && +- screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) +- return false; +- +- if (screen_info.lfb_base < pci_resource_start(pdev, 1)) +- goto not_fb; +- +- if (screen_info.lfb_base + screen_info.lfb_size >= +- pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1)) +- goto not_fb; +- +- return true; +-not_fb: +- ramin = 2; +- if (pci_resource_len(pdev, ramin) == 0) { +- ramin = 3; +- if (pci_resource_len(pdev, ramin) == 0) +- return false; +- } +- +- if (screen_info.lfb_base < pci_resource_start(pdev, ramin)) +- return false; +- +- if (screen_info.lfb_base + screen_info.lfb_size >= +- pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin)) +- return false; +- +- return true; +-} +-#endif +- +-void +-nouveau_fbcon_zfill(struct drm_device *dev) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct fb_info *info = dev_priv->fbdev_info; ++ struct fb_info *info = nfbdev->helper.fbdev; + struct fb_fillrect rect; + + /* Clear the entire fbcon. The drm will program every connector +@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev) + } + + static int +-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, +- uint32_t fb_height, uint32_t surface_width, +- uint32_t surface_height, uint32_t surface_depth, +- uint32_t surface_bpp, struct drm_framebuffer **pfb) ++nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ++ struct drm_fb_helper_surface_size *sizes) + { ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct fb_info *info; +- struct nouveau_fbcon_par *par; + struct drm_framebuffer *fb; + struct nouveau_framebuffer *nouveau_fb; + struct nouveau_bo *nvbo; + struct drm_mode_fb_cmd mode_cmd; +- struct device *device = &dev->pdev->dev; ++ struct pci_dev *pdev = dev->pdev; ++ struct device *device = &pdev->dev; + int size, ret; + +- mode_cmd.width = surface_width; +- mode_cmd.height = surface_height; ++ mode_cmd.width = sizes->surface_width; ++ mode_cmd.height = sizes->surface_height; + +- mode_cmd.bpp = surface_bpp; ++ mode_cmd.bpp = sizes->surface_bpp; + mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); + mode_cmd.pitch = roundup(mode_cmd.pitch, 256); +- mode_cmd.depth = surface_depth; ++ mode_cmd.depth = sizes->surface_depth; + + size = mode_cmd.pitch * mode_cmd.height; + size = roundup(size, PAGE_SIZE); +@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, + + mutex_lock(&dev->struct_mutex); + +- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd); +- if (!fb) { ++ info = framebuffer_alloc(0, device); ++ if (!info) { + ret = -ENOMEM; +- NV_ERROR(dev, "failed to allocate fb.\n"); + goto out_unref; + } + +- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); +- +- nouveau_fb = nouveau_framebuffer(fb); +- *pfb = fb; +- +- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device); +- if (!info) { ++ ret = fb_alloc_cmap(&info->cmap, 256, 0); ++ if (ret) { + ret = -ENOMEM; + goto out_unref; + } + +- par = info->par; +- par->helper.funcs = &nouveau_fbcon_helper_funcs; +- par->helper.dev = dev; +- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4); +- if (ret) +- goto out_unref; +- dev_priv->fbdev_info = info; ++ info->par = nfbdev; ++ ++ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); ++ ++ nouveau_fb = &nfbdev->nouveau_fb; ++ fb = &nouveau_fb->base; ++ ++ /* setup helper */ ++ nfbdev->helper.fb = fb; ++ nfbdev->helper.fbdev = info; + + strcpy(info->fix.id, "nouveaufb"); + if (nouveau_nofbaccel) +@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, + info->screen_size = size; + + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); +- drm_fb_helper_fill_var(info, fb, fb_width, fb_height); ++ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); + + /* FIXME: we really shouldn't expose mmio space at all */ +- info->fix.mmio_start = pci_resource_start(dev->pdev, 1); +- info->fix.mmio_len = pci_resource_len(dev->pdev, 1); ++ info->fix.mmio_start = pci_resource_start(pdev, 1); ++ info->fix.mmio_len = pci_resource_len(pdev, 1); + + /* Set aperture base/size for vesafb takeover */ +-#if defined(__i386__) || defined(__x86_64__) +- if (nouveau_fbcon_has_vesafb_or_efifb(dev)) { +- /* Some NVIDIA VBIOS' are stupid and decide to put the +- * framebuffer in the middle of the PRAMIN BAR for +- * whatever reason. We need to know the exact lfb_base +- * to get vesafb kicked off, and the only reliable way +- * we have left is to find out lfb_base the same way +- * vesafb did. +- */ +- info->aperture_base = screen_info.lfb_base; +- info->aperture_size = screen_info.lfb_size; +- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) +- info->aperture_size *= 65536; +- } else +-#endif +- { +- info->aperture_base = info->fix.mmio_start; +- info->aperture_size = info->fix.mmio_len; ++ info->apertures = dev_priv->apertures; ++ if (!info->apertures) { ++ ret = -ENOMEM; ++ goto out_unref; + } + + info->pixmap.size = 64*1024; +@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + +- fb->fbdev = info; +- +- par->nouveau_fb = nouveau_fb; +- par->dev = dev; +- + if (dev_priv->channel && !nouveau_nofbaccel) { + switch (dev_priv->card_type) { + case NV_50: +@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, + }; + } + +- nouveau_fbcon_zfill(dev); ++ nouveau_fbcon_zfill(dev, nfbdev); + + /* To allow resizeing without swapping buffers */ + NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", +@@ -379,44 +309,129 @@ out: + return ret; + } + +-int +-nouveau_fbcon_probe(struct drm_device *dev) ++static int ++nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, ++ struct drm_fb_helper_surface_size *sizes) + { +- NV_DEBUG_KMS(dev, "\n"); ++ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; ++ int new_fb = 0; ++ int ret; ++ ++ if (!helper->fb) { ++ ret = nouveau_fbcon_create(nfbdev, sizes); ++ if (ret) ++ return ret; ++ new_fb = 1; ++ } ++ return new_fb; ++} + +- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); ++void ++nouveau_fbcon_output_poll_changed(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); + } + + int +-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) ++nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) + { +- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb); ++ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; + struct fb_info *info; + +- if (!fb) +- return -EINVAL; +- +- info = fb->fbdev; +- if (info) { +- struct nouveau_fbcon_par *par = info->par; +- ++ if (nfbdev->helper.fbdev) { ++ info = nfbdev->helper.fbdev; + unregister_framebuffer(info); ++ if (info->cmap.len) ++ fb_dealloc_cmap(&info->cmap); ++ framebuffer_release(info); ++ } ++ ++ if (nouveau_fb->nvbo) { + nouveau_bo_unmap(nouveau_fb->nvbo); + drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); + nouveau_fb->nvbo = NULL; +- if (par) +- drm_fb_helper_free(&par->helper); +- framebuffer_release(info); + } +- ++ drm_fb_helper_fini(&nfbdev->helper); ++ drm_framebuffer_cleanup(&nouveau_fb->base); + return 0; + } + + void nouveau_fbcon_gpu_lockup(struct fb_info *info) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + } ++ ++static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { ++ .gamma_set = nouveau_fbcon_gamma_set, ++ .gamma_get = nouveau_fbcon_gamma_get, ++ .fb_probe = nouveau_fbcon_find_or_create_single, ++}; ++ ++ ++int nouveau_fbcon_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_fbdev *nfbdev; ++ int ret; ++ ++ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); ++ if (!nfbdev) ++ return -ENOMEM; ++ ++ nfbdev->dev = dev; ++ dev_priv->nfbdev = nfbdev; ++ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; ++ ++ ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); ++ if (ret) { ++ kfree(nfbdev); ++ return ret; ++ } ++ ++ drm_fb_helper_single_add_all_connectors(&nfbdev->helper); ++ drm_fb_helper_initial_config(&nfbdev->helper, 32); ++ return 0; ++} ++ ++void nouveau_fbcon_fini(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (!dev_priv->nfbdev) ++ return; ++ ++ nouveau_fbcon_destroy(dev, dev_priv->nfbdev); ++ kfree(dev_priv->nfbdev); ++ dev_priv->nfbdev = NULL; ++} ++ ++void nouveau_fbcon_save_disable_accel(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; ++ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; ++} ++ ++void nouveau_fbcon_restore_accel(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; ++} ++ ++void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); ++} ++ ++void nouveau_fbcon_zfill_all(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nouveau_fbcon_zfill(dev, dev_priv->nfbdev); ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h +index f9c34e1..e7e1268 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h +@@ -29,16 +29,16 @@ + + #include "drm_fb_helper.h" + +-struct nouveau_fbcon_par { ++#include "nouveau_fb.h" ++struct nouveau_fbdev { + struct drm_fb_helper helper; ++ struct nouveau_framebuffer nouveau_fb; ++ struct list_head fbdev_list; + struct drm_device *dev; +- struct nouveau_framebuffer *nouveau_fb; ++ unsigned int saved_flags; + }; + +-int nouveau_fbcon_probe(struct drm_device *dev); +-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); + void nouveau_fbcon_restore(void); +-void nouveau_fbcon_zfill(struct drm_device *dev); + + void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); + void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); + int nv50_fbcon_accel_init(struct fb_info *info); + + void nouveau_fbcon_gpu_lockup(struct fb_info *info); ++ ++int nouveau_fbcon_init(struct drm_device *dev); ++void nouveau_fbcon_fini(struct drm_device *dev); ++void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); ++void nouveau_fbcon_zfill_all(struct drm_device *dev); ++void nouveau_fbcon_save_disable_accel(struct drm_device *dev); ++void nouveau_fbcon_restore_accel(struct drm_device *dev); ++ ++void nouveau_fbcon_output_poll_changed(struct drm_device *dev); + #endif /* __NV50_FBCON_H__ */ + +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +index 1bc0b38..69c76cf 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem) + } + + ttm_bo_unref(&bo); ++ ++ drm_gem_object_release(gem); ++ kfree(gem); + } + + int +@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, + + nvbo->channel = chan; + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, +- false, false); ++ false, false, false); + nvbo->channel = NULL; + if (unlikely(ret)) { + NV_ERROR(dev, "fail ttm_validate\n"); +diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c +index 32f0e49..f731c5f 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c ++++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c +@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev) + return ret; + } + +- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); ++ pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (!pgraph->ctxprog) { + NV_ERROR(dev, "OOM copying ctxprog\n"); + release_firmware(fw); + return -ENOMEM; + } +- memcpy(pgraph->ctxprog, fw->data, fw->size); + + cp = pgraph->ctxprog; + if (le32_to_cpu(cp->signature) != 0x5043564e || +@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev) + return ret; + } + +- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); ++ pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (!pgraph->ctxvals) { + NV_ERROR(dev, "OOM copying ctxvals\n"); + release_firmware(fw); + nouveau_grctx_fini(dev); + return -ENOMEM; + } +- memcpy(pgraph->ctxvals, fw->data, fw->size); + + cv = (void *)pgraph->ctxvals; + if (le32_to_cpu(cv->signature) != 0x5643564e || +diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c +index 88583e7..316a3c7 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c ++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c +@@ -254,16 +254,27 @@ struct nouveau_i2c_chan * + nouveau_i2c_find(struct drm_device *dev, int index) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->vbios; ++ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; + + if (index >= DCB_MAX_NUM_I2C_ENTRIES) + return NULL; + +- if (!bios->dcb.i2c[index].chan) { +- if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index)) +- return NULL; ++ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { ++ uint32_t reg = 0xe500, val; ++ ++ if (i2c->port_type == 6) { ++ reg += i2c->read * 0x50; ++ val = 0x2002; ++ } else { ++ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; ++ val = 0xe001; ++ } ++ ++ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); + } + +- return bios->dcb.i2c[index].chan; ++ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) ++ return NULL; ++ return i2c->chan; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c +index 13e73ce..53360f1 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_irq.c ++++ b/drivers/gpu/drm/nouveau/nouveau_irq.c +@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) + { + struct drm_device *dev = (struct drm_device *)arg; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t status, fbdev_flags = 0; ++ uint32_t status; + unsigned long flags; + + status = nv_rd32(dev, NV03_PMC_INTR_0); +@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + +- if (dev_priv->fbdev_info) { +- fbdev_flags = dev_priv->fbdev_info->flags; +- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; +- } +- + if (status & NV_PMC_INTR_0_PFIFO_PENDING) { + nouveau_fifo_irq_handler(dev); + status &= ~NV_PMC_INTR_0_PFIFO_PENDING; +@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) + if (status) + NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); + +- if (dev_priv->fbdev_info) +- dev_priv->fbdev_info->flags = fbdev_flags; +- + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + return IRQ_HANDLED; +diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c +index 775a701..c1fd42b 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_mem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c +@@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev) + dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); + dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) +- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; ++ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); ++ dev_priv->vram_sys_base <<= 12; + } + + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); +diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h +index aa9b310..6ca80a3 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_reg.h ++++ b/drivers/gpu/drm/nouveau/nouveau_reg.h +@@ -826,6 +826,7 @@ + #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 + #define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) + #define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) ++#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) + #define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) + + #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index e171064..b02a231 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -34,6 +34,7 @@ + + #include "nouveau_drv.h" + #include "nouveau_drm.h" ++#include "nouveau_fbcon.h" + #include "nv50_display.h" + + static void nouveau_stub_takedown(struct drm_device *dev) {} +@@ -375,12 +376,15 @@ out_err: + static void nouveau_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) + { ++ struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { + printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); + nouveau_pci_resume(pdev); ++ drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); ++ drm_kms_helper_poll_disable(dev); + nouveau_pci_suspend(pdev, pmm); + } + } +@@ -515,8 +519,10 @@ nouveau_card_init(struct drm_device *dev) + + dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) +- drm_helper_initial_config(dev); ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ nouveau_fbcon_init(dev); ++ drm_kms_helper_poll_init(dev); ++ } + + return 0; + +@@ -563,6 +569,7 @@ static void nouveau_card_takedown(struct drm_device *dev) + NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); + + if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { ++ + nouveau_backlight_exit(dev); + + if (dev_priv->channel) { +@@ -637,6 +644,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev) + #endif + } + ++static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev) ++{ ++ struct pci_dev *pdev = dev->pdev; ++ struct apertures_struct *aper = alloc_apertures(3); ++ if (!aper) ++ return NULL; ++ ++ aper->ranges[0].base = pci_resource_start(pdev, 1); ++ aper->ranges[0].size = pci_resource_len(pdev, 1); ++ aper->count = 1; ++ ++ if (pci_resource_len(pdev, 2)) { ++ aper->ranges[aper->count].base = pci_resource_start(pdev, 2); ++ aper->ranges[aper->count].size = pci_resource_len(pdev, 2); ++ aper->count++; ++ } ++ ++ if (pci_resource_len(pdev, 3)) { ++ aper->ranges[aper->count].base = pci_resource_start(pdev, 3); ++ aper->ranges[aper->count].size = pci_resource_len(pdev, 3); ++ aper->count++; ++ } ++ ++ return aper; ++} ++ ++static int nouveau_remove_conflicting_drivers(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ bool primary = false; ++ dev_priv->apertures = nouveau_get_apertures(dev); ++ if (!dev_priv->apertures) ++ return -ENOMEM; ++ ++#ifdef CONFIG_X86 ++ primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; ++#endif ++ ++ remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); ++ return 0; ++} ++ + int nouveau_load(struct drm_device *dev, unsigned long flags) + { + struct drm_nouveau_private *dev_priv; +@@ -724,29 +773,30 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", + dev_priv->card_type, reg0); + +- /* map larger RAMIN aperture on NV40 cards */ +- dev_priv->ramin = NULL; ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ int ret = nouveau_remove_conflicting_drivers(dev); ++ if (ret) ++ return ret; ++ } ++ ++ /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ + if (dev_priv->card_type >= NV_40) { + int ramin_bar = 2; + if (pci_resource_len(dev->pdev, ramin_bar) == 0) + ramin_bar = 3; + + dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); +- dev_priv->ramin = ioremap( +- pci_resource_start(dev->pdev, ramin_bar), ++ dev_priv->ramin = ++ ioremap(pci_resource_start(dev->pdev, ramin_bar), + dev_priv->ramin_size); + if (!dev_priv->ramin) { +- NV_ERROR(dev, "Failed to init RAMIN mapping, " +- "limited instance memory available\n"); ++ NV_ERROR(dev, "Failed to PRAMIN BAR"); ++ return -ENOMEM; + } +- } +- +- /* On older cards (or if the above failed), create a map covering +- * the BAR0 PRAMIN aperture */ +- if (!dev_priv->ramin) { ++ } else { + dev_priv->ramin_size = 1 * 1024 * 1024; + dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, +- dev_priv->ramin_size); ++ dev_priv->ramin_size); + if (!dev_priv->ramin) { + NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); + return -ENOMEM; +@@ -794,6 +844,8 @@ int nouveau_unload(struct drm_device *dev) + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ drm_kms_helper_poll_fini(dev); ++ nouveau_fbcon_fini(dev); + if (dev_priv->card_type >= NV_50) + nv50_display_destroy(dev); + else +@@ -859,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, + case NOUVEAU_GETPARAM_VM_VRAM_BASE: + getparam->value = dev_priv->vm_vram_base; + break; ++ case NOUVEAU_GETPARAM_PTIMER_TIME: ++ getparam->value = dev_priv->engine.timer.read(dev); ++ break; + case NOUVEAU_GETPARAM_GRAPH_UNITS: + /* NV40 and NV50 versions are quite different, but register + * address is the same. User is supposed to know the card +diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c +index 89a91b9..aaf3de3 100644 +--- a/drivers/gpu/drm/nouveau/nv04_cursor.c ++++ b/drivers/gpu/drm/nouveau/nv04_cursor.c +@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) + static void + nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) + { ++ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; + NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, + NV_PRAMDAC_CU_START_POS, + XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | +diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c +index 813b25c..1eeac4f 100644 +--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c +@@ -30,8 +30,8 @@ + void + nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + +@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) + void + nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + +@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) + void + nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + uint32_t fg; +@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) + int + nv04_fbcon_accel_init(struct fb_info *info) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + const int sub = NvSubCtxSurf2D; +@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info) + if (ret) + return ret; + +- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? ++ ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? + 0x009f : 0x005f, NvImageBlit); + if (ret) + return ret; +diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c +index e260986..618355e 100644 +--- a/drivers/gpu/drm/nouveau/nv04_graph.c ++++ b/drivers/gpu/drm/nouveau/nv04_graph.c +@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, + return 0; + } + +-static int +-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++/* ++ * Software methods, why they are needed, and how they all work: ++ * ++ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some ++ * 2d engine settings are kept inside the grobjs themselves. The grobjs are ++ * 3 words long on both. grobj format on NV04 is: ++ * ++ * word 0: ++ * - bits 0-7: class ++ * - bit 12: color key active ++ * - bit 13: clip rect active ++ * - bit 14: if set, destination surface is swizzled and taken from buffer 5 ++ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken ++ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or ++ * NV03_CONTEXT_SURFACE_DST]. ++ * - bits 15-17: 2d operation [aka patch config] ++ * - bit 24: patch valid [enables rendering using this object] ++ * - bit 25: surf3d valid [for tex_tri and multitex_tri only] ++ * word 1: ++ * - bits 0-1: mono format ++ * - bits 8-13: color format ++ * - bits 16-31: DMA_NOTIFY instance ++ * word 2: ++ * - bits 0-15: DMA_A instance ++ * - bits 16-31: DMA_B instance ++ * ++ * On NV05 it's: ++ * ++ * word 0: ++ * - bits 0-7: class ++ * - bit 12: color key active ++ * - bit 13: clip rect active ++ * - bit 14: if set, destination surface is swizzled and taken from buffer 5 ++ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken ++ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or ++ * NV03_CONTEXT_SURFACE_DST]. ++ * - bits 15-17: 2d operation [aka patch config] ++ * - bits 20-22: dither mode ++ * - bit 24: patch valid [enables rendering using this object] ++ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid ++ * - bit 26: surface_src/surface_zeta valid ++ * - bit 27: pattern valid ++ * - bit 28: rop valid ++ * - bit 29: beta1 valid ++ * - bit 30: beta4 valid ++ * word 1: ++ * - bits 0-1: mono format ++ * - bits 8-13: color format ++ * - bits 16-31: DMA_NOTIFY instance ++ * word 2: ++ * - bits 0-15: DMA_A instance ++ * - bits 16-31: DMA_B instance ++ * ++ * NV05 will set/unset the relevant valid bits when you poke the relevant ++ * object-binding methods with object of the proper type, or with the NULL ++ * type. It'll only allow rendering using the grobj if all needed objects ++ * are bound. The needed set of objects depends on selected operation: for ++ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND. ++ * ++ * NV04 doesn't have these methods implemented at all, and doesn't have the ++ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24 ++ * is set. So we have to emulate them in software, internally keeping the ++ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04, ++ * but the last word isn't actually used for anything, we abuse it for this ++ * purpose. ++ * ++ * Actually, NV05 can optionally check bit 24 too, but we disable this since ++ * there's no use for it. ++ * ++ * For unknown reasons, NV04 implements surf3d binding in hardware as an ++ * exception. Also for unknown reasons, NV04 doesn't implement the clipping ++ * methods on the surf3d object, so we have to emulate them too. ++ */ ++ ++static void ++nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) + { + struct drm_device *dev = chan->dev; + uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; +@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, + uint32_t tmp; + + tmp = nv_ri32(dev, instance); +- tmp &= ~0x00038000; +- tmp |= ((data & 7) << 15); ++ tmp &= ~mask; ++ tmp |= value; + + nv_wi32(dev, instance, tmp); + nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); + nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); ++} ++ ++static void ++nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; ++ uint32_t tmp, ctx1; ++ int class, op, valid = 1; ++ ++ ctx1 = nv_ri32(dev, instance); ++ class = ctx1 & 0xff; ++ op = (ctx1 >> 15) & 7; ++ tmp = nv_ri32(dev, instance + 0xc); ++ tmp &= ~mask; ++ tmp |= value; ++ nv_wi32(dev, instance + 0xc, tmp); ++ ++ /* check for valid surf2d/surf_dst/surf_color */ ++ if (!(tmp & 0x02000000)) ++ valid = 0; ++ /* check for valid surf_src/surf_zeta */ ++ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000)) ++ valid = 0; ++ ++ switch (op) { ++ /* SRCCOPY_AND, SRCCOPY: no extra objects required */ ++ case 0: ++ case 3: ++ break; ++ /* ROP_AND: requires pattern and rop */ ++ case 1: ++ if (!(tmp & 0x18000000)) ++ valid = 0; ++ break; ++ /* BLEND_AND: requires beta1 */ ++ case 2: ++ if (!(tmp & 0x20000000)) ++ valid = 0; ++ break; ++ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */ ++ case 4: ++ case 5: ++ if (!(tmp & 0x40000000)) ++ valid = 0; ++ break; ++ } ++ ++ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24); ++} ++ ++static int ++nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ if (data > 5) ++ return 1; ++ /* Old versions of the objects only accept first three operations. */ ++ if (data > 2 && grclass < 0x40) ++ return 1; ++ nv04_graph_set_ctx1(chan, 0x00038000, data << 15); ++ /* changing operation changes set of objects needed for validation */ ++ nv04_graph_set_ctx_val(chan, 0, 0); ++ return 0; ++} ++ ++static int ++nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ uint32_t min = data & 0xffff, max; ++ uint32_t w = data >> 16; ++ if (min & 0x8000) ++ /* too large */ ++ return 1; ++ if (w & 0x8000) ++ /* yes, it accepts negative for some reason. */ ++ w |= 0xffff0000; ++ max = min + w; ++ max &= 0x3ffff; ++ nv_wr32(chan->dev, 0x40053c, min); ++ nv_wr32(chan->dev, 0x400544, max); ++ return 0; ++} ++ ++static int ++nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ uint32_t min = data & 0xffff, max; ++ uint32_t w = data >> 16; ++ if (min & 0x8000) ++ /* too large */ ++ return 1; ++ if (w & 0x8000) ++ /* yes, it accepts negative for some reason. */ ++ w |= 0xffff0000; ++ max = min + w; ++ max &= 0x3ffff; ++ nv_wr32(chan->dev, 0x400540, min); ++ nv_wr32(chan->dev, 0x400548, max); + return 0; + } + ++static int ++nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx1(chan, 0x00004000, 0); ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0); ++ return 0; ++ case 0x42: ++ nv04_graph_set_ctx1(chan, 0x00004000, 0); ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx1(chan, 0x00004000, 0); ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0); ++ return 0; ++ case 0x42: ++ nv04_graph_set_ctx1(chan, 0x00004000, 0); ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); ++ return 0; ++ case 0x52: ++ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000); ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x08000000, 0); ++ return 0; ++ case 0x18: ++ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x08000000, 0); ++ return 0; ++ case 0x44: ++ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x10000000, 0); ++ return 0; ++ case 0x43: ++ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x20000000, 0); ++ return 0; ++ case 0x12: ++ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x40000000, 0); ++ return 0; ++ case 0x72: ++ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0); ++ return 0; ++ case 0x58: ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x04000000, 0); ++ return 0; ++ case 0x59: ++ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0); ++ return 0; ++ case 0x5a: ++ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx_val(chan, 0x04000000, 0); ++ return 0; ++ case 0x5b: ++ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx1(chan, 0x2000, 0); ++ return 0; ++ case 0x19: ++ nv04_graph_set_ctx1(chan, 0x2000, 0x2000); ++ return 0; ++ } ++ return 1; ++} ++ ++static int ++nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, ++ int mthd, uint32_t data) ++{ ++ switch (nv_ri32(chan->dev, data << 4) & 0xff) { ++ case 0x30: ++ nv04_graph_set_ctx1(chan, 0x1000, 0); ++ return 0; ++ /* Yes, for some reason even the old versions of objects ++ * accept 0x57 and not 0x17. Consistency be damned. ++ */ ++ case 0x57: ++ nv04_graph_set_ctx1(chan, 0x1000, 0x1000); ++ return 0; ++ } ++ return 1; ++} ++ + static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { + { 0x0150, nv04_graph_mthd_set_ref }, + {} + }; + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { ++ { 0x0184, nv04_graph_mthd_bind_nv01_patt }, ++ { 0x0188, nv04_graph_mthd_bind_rop }, ++ { 0x018c, nv04_graph_mthd_bind_beta1 }, ++ { 0x0190, nv04_graph_mthd_bind_surf_dst }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { ++ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_beta4 }, ++ { 0x0198, nv04_graph_mthd_bind_surf2d }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { ++ { 0x0184, nv04_graph_mthd_bind_chroma }, ++ { 0x0188, nv04_graph_mthd_bind_clip }, ++ { 0x018c, nv04_graph_mthd_bind_nv01_patt }, ++ { 0x0190, nv04_graph_mthd_bind_rop }, ++ { 0x0194, nv04_graph_mthd_bind_beta1 }, ++ { 0x0198, nv04_graph_mthd_bind_surf_dst }, ++ { 0x019c, nv04_graph_mthd_bind_surf_src }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { ++ { 0x0184, nv04_graph_mthd_bind_chroma }, ++ { 0x0188, nv04_graph_mthd_bind_clip }, ++ { 0x018c, nv04_graph_mthd_bind_nv04_patt }, ++ { 0x0190, nv04_graph_mthd_bind_rop }, ++ { 0x0194, nv04_graph_mthd_bind_beta1 }, ++ { 0x0198, nv04_graph_mthd_bind_beta4 }, ++ { 0x019c, nv04_graph_mthd_bind_surf2d }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { ++ { 0x0188, nv04_graph_mthd_bind_chroma }, ++ { 0x018c, nv04_graph_mthd_bind_clip }, ++ { 0x0190, nv04_graph_mthd_bind_nv04_patt }, ++ { 0x0194, nv04_graph_mthd_bind_rop }, ++ { 0x0198, nv04_graph_mthd_bind_beta1 }, ++ { 0x019c, nv04_graph_mthd_bind_beta4 }, ++ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, ++ { 0x03e4, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { ++ { 0x0184, nv04_graph_mthd_bind_chroma }, ++ { 0x0188, nv04_graph_mthd_bind_clip }, ++ { 0x018c, nv04_graph_mthd_bind_nv01_patt }, ++ { 0x0190, nv04_graph_mthd_bind_rop }, ++ { 0x0194, nv04_graph_mthd_bind_beta1 }, ++ { 0x0198, nv04_graph_mthd_bind_surf_dst }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { ++ { 0x0184, nv04_graph_mthd_bind_chroma }, ++ { 0x0188, nv04_graph_mthd_bind_nv01_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, + }; + ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { ++ { 0x0184, nv04_graph_mthd_bind_chroma }, ++ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_beta4 }, ++ { 0x0198, nv04_graph_mthd_bind_surf2d }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { ++ { 0x0188, nv04_graph_mthd_bind_nv01_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_surf_dst }, ++ { 0x0304, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { ++ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_beta4 }, ++ { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, ++ { 0x0304, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { ++ { 0x0184, nv04_graph_mthd_bind_clip }, ++ { 0x0188, nv04_graph_mthd_bind_nv01_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_surf_dst }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { ++ { 0x0184, nv04_graph_mthd_bind_clip }, ++ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, ++ { 0x018c, nv04_graph_mthd_bind_rop }, ++ { 0x0190, nv04_graph_mthd_bind_beta1 }, ++ { 0x0194, nv04_graph_mthd_bind_beta4 }, ++ { 0x0198, nv04_graph_mthd_bind_surf2d }, ++ { 0x02fc, nv04_graph_mthd_set_operation }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { ++ { 0x0188, nv04_graph_mthd_bind_clip }, ++ { 0x018c, nv04_graph_mthd_bind_surf_color }, ++ { 0x0190, nv04_graph_mthd_bind_surf_zeta }, ++ {}, ++}; ++ ++static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { ++ { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, ++ { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, ++ {}, ++}; ++ + struct nouveau_pgraph_object_class nv04_graph_grclass[] = { +- { 0x0039, false, NULL }, +- { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ +- { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ +- { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ +- { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */ ++ { 0x0038, false, NULL }, /* dvd subpicture */ ++ { 0x0039, false, NULL }, /* m2mf */ ++ { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ ++ { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ ++ { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ ++ { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ ++ { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ ++ { 0x0064, false, NULL }, /* nv05 iifc */ ++ { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ ++ { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ ++ { 0x0065, false, NULL }, /* nv05 ifc */ ++ { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ ++ { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ ++ { 0x0066, false, NULL }, /* nv05 sifc */ ++ { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ ++ { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ + { 0x0030, false, NULL }, /* null */ + { 0x0042, false, NULL }, /* surf2d */ + { 0x0043, false, NULL }, /* rop */ + { 0x0012, false, NULL }, /* beta1 */ + { 0x0072, false, NULL }, /* beta4 */ + { 0x0019, false, NULL }, /* cliprect */ +- { 0x0044, false, NULL }, /* pattern */ ++ { 0x0018, false, NULL }, /* nv01 pattern */ ++ { 0x0044, false, NULL }, /* nv04 pattern */ + { 0x0052, false, NULL }, /* swzsurf */ +- { 0x0053, false, NULL }, /* surf3d */ ++ { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ ++ { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ + { 0x0054, false, NULL }, /* tex_tri */ + { 0x0055, false, NULL }, /* multitex_tri */ ++ { 0x0017, false, NULL }, /* nv01 chroma */ ++ { 0x0057, false, NULL }, /* nv04 chroma */ ++ { 0x0058, false, NULL }, /* surf_dst */ ++ { 0x0059, false, NULL }, /* surf_src */ ++ { 0x005a, false, NULL }, /* surf_color */ ++ { 0x005b, false, NULL }, /* surf_zeta */ ++ { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ ++ { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ ++ { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ ++ { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ ++ { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ ++ { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ + { 0x506e, true, nv04_graph_mthds_sw }, + {} + }; +diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c +index 0616c96..704a25d 100644 +--- a/drivers/gpu/drm/nouveau/nv40_graph.c ++++ b/drivers/gpu/drm/nouveau/nv40_graph.c +@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev) + + if (!dev_priv->engine.graph.ctxprog) { + struct nouveau_grctx ctx = {}; +- uint32_t cp[256]; ++ uint32_t *cp; ++ ++ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); ++ if (!cp) ++ return -ENOMEM; + + ctx.dev = dev; + ctx.mode = NOUVEAU_GRCTX_PROG; +@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev) + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); + for (i = 0; i < ctx.ctxprog_len; i++) + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); ++ ++ kfree(cp); + } + + /* No context present currently */ +diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c +index 11b11c3..9b5c974 100644 +--- a/drivers/gpu/drm/nouveau/nv40_grctx.c ++++ b/drivers/gpu/drm/nouveau/nv40_grctx.c +@@ -115,11 +115,6 @@ + + /* TODO: + * - get vs count from 0x1540 +- * - document unimplemented bits compared to nvidia +- * - nsource handling +- * - R0 & 0x0200 handling +- * - single-vs handling +- * - 400314 bit 0 + */ + + static int +diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c +new file mode 100644 +index 0000000..2cdc2bf +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_calc.c +@@ -0,0 +1,87 @@ ++/* ++ * Copyright 2010 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm_fixed.h" ++#include "nouveau_drv.h" ++#include "nouveau_hw.h" ++ ++int ++nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, ++ int *N1, int *M1, int *N2, int *M2, int *P) ++{ ++ struct nouveau_pll_vals pll_vals; ++ int ret; ++ ++ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals); ++ if (ret <= 0) ++ return ret; ++ ++ *N1 = pll_vals.N1; ++ *M1 = pll_vals.M1; ++ *N2 = pll_vals.N2; ++ *M2 = pll_vals.M2; ++ *P = pll_vals.log2P; ++ return ret; ++} ++ ++int ++nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, ++ int *N, int *fN, int *M, int *P) ++{ ++ fixed20_12 fb_div, a, b; ++ ++ *P = pll->vco1.maxfreq / clk; ++ if (*P > pll->max_p) ++ *P = pll->max_p; ++ if (*P < pll->min_p) ++ *P = pll->min_p; ++ ++ /* *M = ceil(refclk / pll->vco.max_inputfreq); */ ++ a.full = dfixed_const(pll->refclk); ++ b.full = dfixed_const(pll->vco1.max_inputfreq); ++ a.full = dfixed_div(a, b); ++ a.full = dfixed_ceil(a); ++ *M = dfixed_trunc(a); ++ ++ /* fb_div = (vco * *M) / refclk; */ ++ fb_div.full = dfixed_const(clk * *P); ++ fb_div.full = dfixed_mul(fb_div, a); ++ a.full = dfixed_const(pll->refclk); ++ fb_div.full = dfixed_div(fb_div, a); ++ ++ /* *N = floor(fb_div); */ ++ a.full = dfixed_floor(fb_div); ++ *N = dfixed_trunc(fb_div); ++ ++ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ ++ b.full = dfixed_const(8192); ++ a.full = dfixed_mul(a, b); ++ fb_div.full = dfixed_mul(fb_div, b); ++ fb_div.full = fb_div.full - a.full; ++ *fN = dfixed_trunc(fb_div) - 4096; ++ *fN &= 0xffff; ++ ++ return clk; ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c +index cfabeb9..b4e4a3b 100644 +--- a/drivers/gpu/drm/nouveau/nv50_crtc.c ++++ b/drivers/gpu/drm/nouveau/nv50_crtc.c +@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) + int + nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) + { +- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); +- struct nouveau_pll_vals pll; +- struct pll_lims limits; ++ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); ++ struct pll_lims pll; + uint32_t reg1, reg2; +- int ret; ++ int ret, N1, M1, N2, M2, P; + +- ret = get_pll_limits(dev, pll_reg, &limits); ++ ret = get_pll_limits(dev, reg, &pll); + if (ret) + return ret; + +- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll); +- if (ret <= 0) +- return ret; ++ if (pll.vco2.maxfreq) { ++ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P); ++ if (ret <= 0) ++ return 0; ++ ++ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", ++ pclk, ret, N1, M1, N2, M2, P); + +- if (limits.vco2.maxfreq) { +- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00; +- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00; +- nv_wr32(dev, pll_reg, 0x10000611); +- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1); +- nv_wr32(dev, pll_reg + 8, +- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2); ++ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; ++ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; ++ nv_wr32(dev, reg, 0x10000611); ++ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); ++ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); + } else { +- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000; +- nv_wr32(dev, pll_reg, 0x50000610); +- nv_wr32(dev, pll_reg + 4, reg1 | +- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1); ++ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); ++ if (ret <= 0) ++ return 0; ++ ++ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", ++ pclk, ret, N1, N2, M1, P); ++ ++ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; ++ nv_wr32(dev, reg, 0x50000610); ++ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); ++ nv_wr32(dev, reg + 8, N2); + } + + return 0; +diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c +index 753e723..03ad7ab 100644 +--- a/drivers/gpu/drm/nouveau/nv50_cursor.c ++++ b/drivers/gpu/drm/nouveau/nv50_cursor.c +@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) + { + struct drm_device *dev = nv_crtc->base.dev; + ++ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; + nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), + ((y & 0xFFFF) << 16) | (x & 0xFFFF)); + /* Needed to make the cursor move. */ +diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c +index 649db4c..580a5d1 100644 +--- a/drivers/gpu/drm/nouveau/nv50_display.c ++++ b/drivers/gpu/drm/nouveau/nv50_display.c +@@ -29,6 +29,7 @@ + #include "nouveau_encoder.h" + #include "nouveau_connector.h" + #include "nouveau_fb.h" ++#include "nouveau_fbcon.h" + #include "drm_crtc_helper.h" + + static void +@@ -783,6 +784,37 @@ ack: + } + + static void ++nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) ++{ ++ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); ++ struct drm_encoder *encoder; ++ uint32_t tmp, unk0 = 0, unk1 = 0; ++ ++ if (dcb->type != OUTPUT_DP) ++ return; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); ++ ++ if (nv_encoder->dcb == dcb) { ++ unk0 = nv_encoder->dp.unk0; ++ unk1 = nv_encoder->dp.unk1; ++ break; ++ } ++ } ++ ++ if (unk0 || unk1) { ++ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); ++ tmp &= 0xfffffe03; ++ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0); ++ ++ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); ++ tmp &= 0xfef080c0; ++ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1); ++ } ++} ++ ++static void + nv50_display_unk20_handler(struct drm_device *dev) + { + struct dcb_entry *dcbent; +@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev) + + nouveau_bios_run_display_table(dev, dcbent, script, pclk); + ++ nv50_display_unk20_dp_hack(dev, dcbent); ++ + tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); + tmp &= ~0x000000f; + nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); +@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) + nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); ++ ++ drm_helper_hpd_irq_event(dev); + } + + void +diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c +index a95e694..32611bd 100644 +--- a/drivers/gpu/drm/nouveau/nv50_fb.c ++++ b/drivers/gpu/drm/nouveau/nv50_fb.c +@@ -6,10 +6,16 @@ + int + nv50_fb_init(struct drm_device *dev) + { +- /* This is needed to get meaningful information from 100c90 +- * on traps. No idea what these values mean exactly. */ + struct drm_nouveau_private *dev_priv = dev->dev_private; + ++ /* Not a clue what this is exactly. Without pointing it at a ++ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) ++ * cause IOMMU "read from address 0" errors (rh#561267) ++ */ ++ nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); ++ ++ /* This is needed to get meaningful information from 100c90 ++ * on traps. No idea what these values mean exactly. */ + switch (dev_priv->chipset) { + case 0x50: + nv_wr32(dev, 0x100c90, 0x0707ff); +diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c +index a8c70e7..6bf025c 100644 +--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c +@@ -6,8 +6,8 @@ + void + nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + +@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) + void + nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + +@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) + void + nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + uint32_t width, dwords, *data = (uint32_t *)image->data; +@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + int + nv50_fbcon_accel_init(struct fb_info *info) + { +- struct nouveau_fbcon_par *par = info->par; +- struct drm_device *dev = par->dev; ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + struct nouveau_gpuobj *eng2d = NULL; +diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c +index c61782b..bb47ad7 100644 +--- a/drivers/gpu/drm/nouveau/nv50_gpio.c ++++ b/drivers/gpu/drm/nouveau/nv50_gpio.c +@@ -31,7 +31,7 @@ nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) + { + const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + +- if (gpio->line > 32) ++ if (gpio->line >= 32) + return -EINVAL; + + *reg = nv50_gpio_reg[gpio->line >> 3]; +diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c +index 0c68698..812778d 100644 +--- a/drivers/gpu/drm/nouveau/nv50_sor.c ++++ b/drivers/gpu/drm/nouveau/nv50_sor.c +@@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { + int + nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_encoder *nv_encoder = NULL; + struct drm_encoder *encoder; + bool dum; +@@ -321,18 +320,19 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) + encoder->possible_clones = 0; + + if (nv_encoder->dcb->type == OUTPUT_DP) { +- uint32_t mc, or = nv_encoder->or; ++ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); ++ uint32_t tmp; + +- if (dev_priv->chipset < 0x90 || +- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) +- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); +- else +- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); ++ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); + +- switch ((mc & 0x00000f00) >> 8) { ++ switch ((tmp & 0x00000f00) >> 8) { + case 8: + case 9: +- nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16; ++ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16; ++ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); ++ nv_encoder->dp.unk0 = tmp & 0x000001fc; ++ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); ++ nv_encoder->dp.unk1 = tmp & 0x010f7f3f; + break; + default: + break; +diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile +index 3c91312..84b1f27 100644 +--- a/drivers/gpu/drm/radeon/Makefile ++++ b/drivers/gpu/drm/radeon/Makefile +@@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable + $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable + $(call if_changed,mkregtable) + ++$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable ++ $(call if_changed,mkregtable) ++ + $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h + + $(obj)/r200.o: $(obj)/r200_reg_safe.h +@@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h + + $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h + ++$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h ++ + radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ + radeon_irq.o r300_cmdbuf.o r600_cp.o + # add KMS driver +@@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ + rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ + r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ + r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ +- evergreen.o ++ evergreen.o evergreen_cs.o + + radeon-$(CONFIG_COMPAT) += radeon_ioc32.o + radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o +diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h +index 27e2c71..2ebcb97 100644 +--- a/drivers/gpu/drm/radeon/atombios.h ++++ b/drivers/gpu/drm/radeon/atombios.h +@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER + #define ATOM_PP_THERMALCONTROLLER_RV6xx 7 + #define ATOM_PP_THERMALCONTROLLER_RV770 8 + #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 ++#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 ++#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 ++#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller + + typedef struct _ATOM_PPLIB_STATE + { +@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE + UCHAR ucClockStateIndices[1]; // variable-sized + } ATOM_PPLIB_STATE; + ++typedef struct _ATOM_PPLIB_FANTABLE ++{ ++ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. ++ UCHAR ucTHyst; // Temperature hysteresis. Integer. ++ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. ++ USHORT usTMed; // The middle temperature where we change slopes. ++ USHORT usTHigh; // The high point above TMed for adjusting the second slope. ++ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). ++ USHORT usPWMMed; // The PWM value (in percent) at TMed. ++ USHORT usPWMHigh; // The PWM value at THigh. ++} ATOM_PPLIB_FANTABLE; ++ ++typedef struct _ATOM_PPLIB_EXTENDEDHEADER ++{ ++ USHORT usSize; ++ ULONG ulMaxEngineClock; // For Overdrive. ++ ULONG ulMaxMemoryClock; // For Overdrive. ++ // Add extra system parameters here, always adjust size to include all fields. ++} ATOM_PPLIB_EXTENDEDHEADER; ++ + //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps + #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 + #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 +@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE + #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 + #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 + #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 ++#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 ++#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. ++#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). ++#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. ++#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. ++#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. + + typedef struct _ATOM_PPLIB_POWERPLAYTABLE + { +@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE + + } ATOM_PPLIB_POWERPLAYTABLE; + ++typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 ++{ ++ ATOM_PPLIB_POWERPLAYTABLE basicTable; ++ UCHAR ucNumCustomThermalPolicy; ++ USHORT usCustomThermalPolicyArrayOffset; ++}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; ++ ++typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 ++{ ++ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; ++ USHORT usFormatID; // To be used ONLY by PPGen. ++ USHORT usFanTableOffset; ++ USHORT usExtendendedHeaderOffset; ++} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; ++ + //// ATOM_PPLIB_NONCLOCK_INFO::usClassification + #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 + #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE + #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 + #define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 + #define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 +-// remaining 3 bits are reserved ++#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 ++#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 ++#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 + + //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings + #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 +@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE + + #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 + #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 ++#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 + #define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 + +-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 ++//memory related flags ++#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 ++ ++//M3 Arb //2bits, current 3 sets of parameters in total ++#define ATOM_PPLIB_M3ARB_MASK 0x00060000 ++#define ATOM_PPLIB_M3ARB_SHIFT 17 + + // Contained in an array starting at the offset + // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. +@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO + // Contained in an array starting at the offset + // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. + // referenced from ATOM_PPLIB_STATE::ucClockStateIndices ++#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 ++#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 ++ + typedef struct _ATOM_PPLIB_R600_CLOCK_INFO + { + USHORT usEngineClockLow; +@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO + #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 + #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 + #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 ++#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). ++ ++typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO ++{ ++ USHORT usEngineClockLow; ++ UCHAR ucEngineClockHigh; ++ ++ USHORT usMemoryClockLow; ++ UCHAR ucMemoryClockHigh; ++ ++ USHORT usVDDC; ++ USHORT usVDDCI; ++ USHORT usUnused; ++ ++ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* ++ ++} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; + + typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index a87990b..f3f2827 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -26,7 +26,7 @@ + #include + #include + #include +-#include "radeon_fixed.h" ++#include + #include "radeon.h" + #include "atom.h" + #include "atom-bits.h" +@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) + + switch (mode) { + case DRM_MODE_DPMS_ON: ++ radeon_crtc->enabled = true; ++ /* adjust pm to dpms changes BEFORE enabling crtcs */ ++ radeon_pm_compute_clocks(rdev); + atombios_enable_crtc(crtc, ATOM_ENABLE); + if (ASIC_IS_DCE3(rdev)) + atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); + atombios_blank_crtc(crtc, ATOM_DISABLE); +- /* XXX re-enable when interrupt support is added */ +- if (!ASIC_IS_DCE4(rdev)) +- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: +- /* XXX re-enable when interrupt support is added */ +- if (!ASIC_IS_DCE4(rdev)) +- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); ++ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); + atombios_blank_crtc(crtc, ATOM_ENABLE); + if (ASIC_IS_DCE3(rdev)) + atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); + atombios_enable_crtc(crtc, ATOM_DISABLE); ++ radeon_crtc->enabled = false; ++ /* adjust pm to dpms changes AFTER disabling crtcs */ ++ radeon_pm_compute_clocks(rdev); + break; + } + } +@@ -705,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode + break; + case ATOM_DCPLL: + case ATOM_PPLL_INVALID: ++ default: + pll = &rdev->clock.dcpll; + break; + } +@@ -1160,6 +1163,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { ++ struct drm_device *dev = crtc->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ ++ /* adjust pm to upcoming mode change */ ++ radeon_pm_compute_clocks(rdev); ++ + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; + return true; +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c +index 28b31c6..abffb14 100644 +--- a/drivers/gpu/drm/radeon/atombios_dp.c ++++ b/drivers/gpu/drm/radeon/atombios_dp.c +@@ -351,7 +351,7 @@ retry: + args.v1.ucChannelID = chan->rec.i2c_id; + args.v1.ucDelay = delay / 10; + if (ASIC_IS_DCE4(rdev)) +- args.v2.ucHPD_ID = chan->rec.hpd_id; ++ args.v2.ucHPD_ID = chan->rec.hpd; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +index e8f447e..4b6623d 100644 +--- a/drivers/gpu/drm/radeon/evergreen.c ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -28,39 +28,246 @@ + #include "radeon.h" + #include "radeon_asic.h" + #include "radeon_drm.h" +-#include "rv770d.h" ++#include "evergreend.h" + #include "atom.h" + #include "avivod.h" + #include "evergreen_reg.h" + ++#define EVERGREEN_PFP_UCODE_SIZE 1120 ++#define EVERGREEN_PM4_UCODE_SIZE 1376 ++ + static void evergreen_gpu_init(struct radeon_device *rdev); + void evergreen_fini(struct radeon_device *rdev); + ++void evergreen_pm_misc(struct radeon_device *rdev) ++{ ++ int req_ps_idx = rdev->pm.requested_power_state_index; ++ int req_cm_idx = rdev->pm.requested_clock_mode_index; ++ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; ++ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; ++ ++ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { ++ if (voltage->voltage != rdev->pm.current_vddc) { ++ radeon_atom_set_voltage(rdev, voltage->voltage); ++ rdev->pm.current_vddc = voltage->voltage; ++ DRM_DEBUG("Setting: v: %d\n", voltage->voltage); ++ } ++ } ++} ++ ++void evergreen_pm_prepare(struct radeon_device *rdev) ++{ ++ struct drm_device *ddev = rdev->ddev; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ u32 tmp; ++ ++ /* disable any active CRTCs */ ++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { ++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); ++ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; ++ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); ++ } ++ } ++} ++ ++void evergreen_pm_finish(struct radeon_device *rdev) ++{ ++ struct drm_device *ddev = rdev->ddev; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ u32 tmp; ++ ++ /* enable any active CRTCs */ ++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { ++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); ++ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; ++ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); ++ } ++ } ++} ++ + bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) + { + bool connected = false; +- /* XXX */ ++ ++ switch (hpd) { ++ case RADEON_HPD_1: ++ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) ++ connected = true; ++ break; ++ case RADEON_HPD_2: ++ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) ++ connected = true; ++ break; ++ case RADEON_HPD_3: ++ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) ++ connected = true; ++ break; ++ case RADEON_HPD_4: ++ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) ++ connected = true; ++ break; ++ case RADEON_HPD_5: ++ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) ++ connected = true; ++ break; ++ case RADEON_HPD_6: ++ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) ++ connected = true; ++ break; ++ default: ++ break; ++ } ++ + return connected; + } + + void evergreen_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd) + { +- /* XXX */ ++ u32 tmp; ++ bool connected = evergreen_hpd_sense(rdev, hpd); ++ ++ switch (hpd) { ++ case RADEON_HPD_1: ++ tmp = RREG32(DC_HPD1_INT_CONTROL); ++ if (connected) ++ tmp &= ~DC_HPDx_INT_POLARITY; ++ else ++ tmp |= DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD1_INT_CONTROL, tmp); ++ break; ++ case RADEON_HPD_2: ++ tmp = RREG32(DC_HPD2_INT_CONTROL); ++ if (connected) ++ tmp &= ~DC_HPDx_INT_POLARITY; ++ else ++ tmp |= DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD2_INT_CONTROL, tmp); ++ break; ++ case RADEON_HPD_3: ++ tmp = RREG32(DC_HPD3_INT_CONTROL); ++ if (connected) ++ tmp &= ~DC_HPDx_INT_POLARITY; ++ else ++ tmp |= DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD3_INT_CONTROL, tmp); ++ break; ++ case RADEON_HPD_4: ++ tmp = RREG32(DC_HPD4_INT_CONTROL); ++ if (connected) ++ tmp &= ~DC_HPDx_INT_POLARITY; ++ else ++ tmp |= DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD4_INT_CONTROL, tmp); ++ break; ++ case RADEON_HPD_5: ++ tmp = RREG32(DC_HPD5_INT_CONTROL); ++ if (connected) ++ tmp &= ~DC_HPDx_INT_POLARITY; ++ else ++ tmp |= DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD5_INT_CONTROL, tmp); ++ break; ++ case RADEON_HPD_6: ++ tmp = RREG32(DC_HPD6_INT_CONTROL); ++ if (connected) ++ tmp &= ~DC_HPDx_INT_POLARITY; ++ else ++ tmp |= DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD6_INT_CONTROL, tmp); ++ break; ++ default: ++ break; ++ } + } + + void evergreen_hpd_init(struct radeon_device *rdev) + { +- /* XXX */ ++ struct drm_device *dev = rdev->ddev; ++ struct drm_connector *connector; ++ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | ++ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ switch (radeon_connector->hpd.hpd) { ++ case RADEON_HPD_1: ++ WREG32(DC_HPD1_CONTROL, tmp); ++ rdev->irq.hpd[0] = true; ++ break; ++ case RADEON_HPD_2: ++ WREG32(DC_HPD2_CONTROL, tmp); ++ rdev->irq.hpd[1] = true; ++ break; ++ case RADEON_HPD_3: ++ WREG32(DC_HPD3_CONTROL, tmp); ++ rdev->irq.hpd[2] = true; ++ break; ++ case RADEON_HPD_4: ++ WREG32(DC_HPD4_CONTROL, tmp); ++ rdev->irq.hpd[3] = true; ++ break; ++ case RADEON_HPD_5: ++ WREG32(DC_HPD5_CONTROL, tmp); ++ rdev->irq.hpd[4] = true; ++ break; ++ case RADEON_HPD_6: ++ WREG32(DC_HPD6_CONTROL, tmp); ++ rdev->irq.hpd[5] = true; ++ break; ++ default: ++ break; ++ } ++ } ++ if (rdev->irq.installed) ++ evergreen_irq_set(rdev); + } + +- +-void evergreen_bandwidth_update(struct radeon_device *rdev) ++void evergreen_hpd_fini(struct radeon_device *rdev) + { +- /* XXX */ ++ struct drm_device *dev = rdev->ddev; ++ struct drm_connector *connector; ++ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct radeon_connector *radeon_connector = to_radeon_connector(connector); ++ switch (radeon_connector->hpd.hpd) { ++ case RADEON_HPD_1: ++ WREG32(DC_HPD1_CONTROL, 0); ++ rdev->irq.hpd[0] = false; ++ break; ++ case RADEON_HPD_2: ++ WREG32(DC_HPD2_CONTROL, 0); ++ rdev->irq.hpd[1] = false; ++ break; ++ case RADEON_HPD_3: ++ WREG32(DC_HPD3_CONTROL, 0); ++ rdev->irq.hpd[2] = false; ++ break; ++ case RADEON_HPD_4: ++ WREG32(DC_HPD4_CONTROL, 0); ++ rdev->irq.hpd[3] = false; ++ break; ++ case RADEON_HPD_5: ++ WREG32(DC_HPD5_CONTROL, 0); ++ rdev->irq.hpd[4] = false; ++ break; ++ case RADEON_HPD_6: ++ WREG32(DC_HPD6_CONTROL, 0); ++ rdev->irq.hpd[5] = false; ++ break; ++ default: ++ break; ++ } ++ } + } + +-void evergreen_hpd_fini(struct radeon_device *rdev) ++void evergreen_bandwidth_update(struct radeon_device *rdev) + { + /* XXX */ + } +@@ -83,10 +290,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) + /* + * GART + */ ++void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) ++{ ++ unsigned i; ++ u32 tmp; ++ ++ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); ++ for (i = 0; i < rdev->usec_timeout; i++) { ++ /* read MC_STATUS */ ++ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); ++ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; ++ if (tmp == 2) { ++ printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); ++ return; ++ } ++ if (tmp) { ++ return; ++ } ++ udelay(1); ++ } ++} ++ + int evergreen_pcie_gart_enable(struct radeon_device *rdev) + { + u32 tmp; +- int r, i; ++ int r; + + if (rdev->gart.table.vram.robj == NULL) { + dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); +@@ -121,10 +349,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); + WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(rdev->dummy_page.addr >> 12)); +- for (i = 1; i < 7; i++) +- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); ++ WREG32(VM_CONTEXT1_CNTL, 0); + +- r600_pcie_gart_tlb_flush(rdev); ++ evergreen_pcie_gart_tlb_flush(rdev); + rdev->gart.ready = true; + return 0; + } +@@ -132,11 +359,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) + void evergreen_pcie_gart_disable(struct radeon_device *rdev) + { + u32 tmp; +- int i, r; ++ int r; + + /* Disable all tables */ +- for (i = 0; i < 7; i++) +- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); ++ WREG32(VM_CONTEXT0_CNTL, 0); ++ WREG32(VM_CONTEXT1_CNTL, 0); + + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | +@@ -173,7 +400,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev) + void evergreen_agp_enable(struct radeon_device *rdev) + { + u32 tmp; +- int i; + + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | +@@ -193,8 +419,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) + WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); +- for (i = 0; i < 7; i++) +- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); ++ WREG32(VM_CONTEXT0_CNTL, 0); ++ WREG32(VM_CONTEXT1_CNTL, 0); + } + + static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) +@@ -400,40 +626,656 @@ static void evergreen_mc_program(struct radeon_device *rdev) + rv515_vga_render_disable(rdev); + } + +-#if 0 + /* + * CP. + */ +-static void evergreen_cp_stop(struct radeon_device *rdev) +-{ +- /* XXX */ +-} +- + + static int evergreen_cp_load_microcode(struct radeon_device *rdev) + { +- /* XXX */ ++ const __be32 *fw_data; ++ int i; + ++ if (!rdev->me_fw || !rdev->pfp_fw) ++ return -EINVAL; ++ ++ r700_cp_stop(rdev); ++ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); ++ ++ fw_data = (const __be32 *)rdev->pfp_fw->data; ++ WREG32(CP_PFP_UCODE_ADDR, 0); ++ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) ++ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); ++ WREG32(CP_PFP_UCODE_ADDR, 0); ++ ++ fw_data = (const __be32 *)rdev->me_fw->data; ++ WREG32(CP_ME_RAM_WADDR, 0); ++ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) ++ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); ++ ++ WREG32(CP_PFP_UCODE_ADDR, 0); ++ WREG32(CP_ME_RAM_WADDR, 0); ++ WREG32(CP_ME_RAM_RADDR, 0); + return 0; + } + ++int evergreen_cp_resume(struct radeon_device *rdev) ++{ ++ u32 tmp; ++ u32 rb_bufsz; ++ int r; ++ ++ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ ++ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | ++ SOFT_RESET_PA | ++ SOFT_RESET_SH | ++ SOFT_RESET_VGT | ++ SOFT_RESET_SX)); ++ RREG32(GRBM_SOFT_RESET); ++ mdelay(15); ++ WREG32(GRBM_SOFT_RESET, 0); ++ RREG32(GRBM_SOFT_RESET); ++ ++ /* Set ring buffer size */ ++ rb_bufsz = drm_order(rdev->cp.ring_size / 8); ++ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; ++#ifdef __BIG_ENDIAN ++ tmp |= BUF_SWAP_32BIT; ++#endif ++ WREG32(CP_RB_CNTL, tmp); ++ WREG32(CP_SEM_WAIT_TIMER, 0x4); ++ ++ /* Set the write pointer delay */ ++ WREG32(CP_RB_WPTR_DELAY, 0); ++ ++ /* Initialize the ring buffer's read and write pointers */ ++ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); ++ WREG32(CP_RB_RPTR_WR, 0); ++ WREG32(CP_RB_WPTR, 0); ++ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); ++ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); ++ mdelay(1); ++ WREG32(CP_RB_CNTL, tmp); ++ ++ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); ++ WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); ++ ++ rdev->cp.rptr = RREG32(CP_RB_RPTR); ++ rdev->cp.wptr = RREG32(CP_RB_WPTR); ++ ++ r600_cp_start(rdev); ++ rdev->cp.ready = true; ++ r = radeon_ring_test(rdev); ++ if (r) { ++ rdev->cp.ready = false; ++ return r; ++ } ++ return 0; ++} + + /* + * Core functions + */ +-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, ++static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, ++ u32 num_tile_pipes, + u32 num_backends, + u32 backend_disable_mask) + { + u32 backend_map = 0; ++ u32 enabled_backends_mask = 0; ++ u32 enabled_backends_count = 0; ++ u32 cur_pipe; ++ u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; ++ u32 cur_backend = 0; ++ u32 i; ++ bool force_no_swizzle; ++ ++ if (num_tile_pipes > EVERGREEN_MAX_PIPES) ++ num_tile_pipes = EVERGREEN_MAX_PIPES; ++ if (num_tile_pipes < 1) ++ num_tile_pipes = 1; ++ if (num_backends > EVERGREEN_MAX_BACKENDS) ++ num_backends = EVERGREEN_MAX_BACKENDS; ++ if (num_backends < 1) ++ num_backends = 1; ++ ++ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { ++ if (((backend_disable_mask >> i) & 1) == 0) { ++ enabled_backends_mask |= (1 << i); ++ ++enabled_backends_count; ++ } ++ if (enabled_backends_count == num_backends) ++ break; ++ } ++ ++ if (enabled_backends_count == 0) { ++ enabled_backends_mask = 1; ++ enabled_backends_count = 1; ++ } ++ ++ if (enabled_backends_count != num_backends) ++ num_backends = enabled_backends_count; ++ ++ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); ++ switch (rdev->family) { ++ case CHIP_CEDAR: ++ case CHIP_REDWOOD: ++ force_no_swizzle = false; ++ break; ++ case CHIP_CYPRESS: ++ case CHIP_HEMLOCK: ++ case CHIP_JUNIPER: ++ default: ++ force_no_swizzle = true; ++ break; ++ } ++ if (force_no_swizzle) { ++ bool last_backend_enabled = false; ++ ++ force_no_swizzle = false; ++ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { ++ if (((enabled_backends_mask >> i) & 1) == 1) { ++ if (last_backend_enabled) ++ force_no_swizzle = true; ++ last_backend_enabled = true; ++ } else ++ last_backend_enabled = false; ++ } ++ } ++ ++ switch (num_tile_pipes) { ++ case 1: ++ case 3: ++ case 5: ++ case 7: ++ DRM_ERROR("odd number of pipes!\n"); ++ break; ++ case 2: ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 1; ++ break; ++ case 4: ++ if (force_no_swizzle) { ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 1; ++ swizzle_pipe[2] = 2; ++ swizzle_pipe[3] = 3; ++ } else { ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 2; ++ swizzle_pipe[2] = 1; ++ swizzle_pipe[3] = 3; ++ } ++ break; ++ case 6: ++ if (force_no_swizzle) { ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 1; ++ swizzle_pipe[2] = 2; ++ swizzle_pipe[3] = 3; ++ swizzle_pipe[4] = 4; ++ swizzle_pipe[5] = 5; ++ } else { ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 2; ++ swizzle_pipe[2] = 4; ++ swizzle_pipe[3] = 1; ++ swizzle_pipe[4] = 3; ++ swizzle_pipe[5] = 5; ++ } ++ break; ++ case 8: ++ if (force_no_swizzle) { ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 1; ++ swizzle_pipe[2] = 2; ++ swizzle_pipe[3] = 3; ++ swizzle_pipe[4] = 4; ++ swizzle_pipe[5] = 5; ++ swizzle_pipe[6] = 6; ++ swizzle_pipe[7] = 7; ++ } else { ++ swizzle_pipe[0] = 0; ++ swizzle_pipe[1] = 2; ++ swizzle_pipe[2] = 4; ++ swizzle_pipe[3] = 6; ++ swizzle_pipe[4] = 1; ++ swizzle_pipe[5] = 3; ++ swizzle_pipe[6] = 5; ++ swizzle_pipe[7] = 7; ++ } ++ break; ++ } ++ ++ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { ++ while (((1 << cur_backend) & enabled_backends_mask) == 0) ++ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; ++ ++ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); ++ ++ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; ++ } + + return backend_map; + } +-#endif + + static void evergreen_gpu_init(struct radeon_device *rdev) + { +- /* XXX */ ++ u32 cc_rb_backend_disable = 0; ++ u32 cc_gc_shader_pipe_config; ++ u32 gb_addr_config = 0; ++ u32 mc_shared_chmap, mc_arb_ramcfg; ++ u32 gb_backend_map; ++ u32 grbm_gfx_index; ++ u32 sx_debug_1; ++ u32 smx_dc_ctl0; ++ u32 sq_config; ++ u32 sq_lds_resource_mgmt; ++ u32 sq_gpr_resource_mgmt_1; ++ u32 sq_gpr_resource_mgmt_2; ++ u32 sq_gpr_resource_mgmt_3; ++ u32 sq_thread_resource_mgmt; ++ u32 sq_thread_resource_mgmt_2; ++ u32 sq_stack_resource_mgmt_1; ++ u32 sq_stack_resource_mgmt_2; ++ u32 sq_stack_resource_mgmt_3; ++ u32 vgt_cache_invalidation; ++ u32 hdp_host_path_cntl; ++ int i, j, num_shader_engines, ps_thread_count; ++ ++ switch (rdev->family) { ++ case CHIP_CYPRESS: ++ case CHIP_HEMLOCK: ++ rdev->config.evergreen.num_ses = 2; ++ rdev->config.evergreen.max_pipes = 4; ++ rdev->config.evergreen.max_tile_pipes = 8; ++ rdev->config.evergreen.max_simds = 10; ++ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; ++ rdev->config.evergreen.max_gprs = 256; ++ rdev->config.evergreen.max_threads = 248; ++ rdev->config.evergreen.max_gs_threads = 32; ++ rdev->config.evergreen.max_stack_entries = 512; ++ rdev->config.evergreen.sx_num_of_sets = 4; ++ rdev->config.evergreen.sx_max_export_size = 256; ++ rdev->config.evergreen.sx_max_export_pos_size = 64; ++ rdev->config.evergreen.sx_max_export_smx_size = 192; ++ rdev->config.evergreen.max_hw_contexts = 8; ++ rdev->config.evergreen.sq_num_cf_insts = 2; ++ ++ rdev->config.evergreen.sc_prim_fifo_size = 0x100; ++ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; ++ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; ++ break; ++ case CHIP_JUNIPER: ++ rdev->config.evergreen.num_ses = 1; ++ rdev->config.evergreen.max_pipes = 4; ++ rdev->config.evergreen.max_tile_pipes = 4; ++ rdev->config.evergreen.max_simds = 10; ++ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; ++ rdev->config.evergreen.max_gprs = 256; ++ rdev->config.evergreen.max_threads = 248; ++ rdev->config.evergreen.max_gs_threads = 32; ++ rdev->config.evergreen.max_stack_entries = 512; ++ rdev->config.evergreen.sx_num_of_sets = 4; ++ rdev->config.evergreen.sx_max_export_size = 256; ++ rdev->config.evergreen.sx_max_export_pos_size = 64; ++ rdev->config.evergreen.sx_max_export_smx_size = 192; ++ rdev->config.evergreen.max_hw_contexts = 8; ++ rdev->config.evergreen.sq_num_cf_insts = 2; ++ ++ rdev->config.evergreen.sc_prim_fifo_size = 0x100; ++ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; ++ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; ++ break; ++ case CHIP_REDWOOD: ++ rdev->config.evergreen.num_ses = 1; ++ rdev->config.evergreen.max_pipes = 4; ++ rdev->config.evergreen.max_tile_pipes = 4; ++ rdev->config.evergreen.max_simds = 5; ++ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; ++ rdev->config.evergreen.max_gprs = 256; ++ rdev->config.evergreen.max_threads = 248; ++ rdev->config.evergreen.max_gs_threads = 32; ++ rdev->config.evergreen.max_stack_entries = 256; ++ rdev->config.evergreen.sx_num_of_sets = 4; ++ rdev->config.evergreen.sx_max_export_size = 256; ++ rdev->config.evergreen.sx_max_export_pos_size = 64; ++ rdev->config.evergreen.sx_max_export_smx_size = 192; ++ rdev->config.evergreen.max_hw_contexts = 8; ++ rdev->config.evergreen.sq_num_cf_insts = 2; ++ ++ rdev->config.evergreen.sc_prim_fifo_size = 0x100; ++ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; ++ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; ++ break; ++ case CHIP_CEDAR: ++ default: ++ rdev->config.evergreen.num_ses = 1; ++ rdev->config.evergreen.max_pipes = 2; ++ rdev->config.evergreen.max_tile_pipes = 2; ++ rdev->config.evergreen.max_simds = 2; ++ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; ++ rdev->config.evergreen.max_gprs = 256; ++ rdev->config.evergreen.max_threads = 192; ++ rdev->config.evergreen.max_gs_threads = 16; ++ rdev->config.evergreen.max_stack_entries = 256; ++ rdev->config.evergreen.sx_num_of_sets = 4; ++ rdev->config.evergreen.sx_max_export_size = 128; ++ rdev->config.evergreen.sx_max_export_pos_size = 32; ++ rdev->config.evergreen.sx_max_export_smx_size = 96; ++ rdev->config.evergreen.max_hw_contexts = 4; ++ rdev->config.evergreen.sq_num_cf_insts = 1; ++ ++ rdev->config.evergreen.sc_prim_fifo_size = 0x40; ++ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; ++ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; ++ break; ++ } ++ ++ /* Initialize HDP */ ++ for (i = 0, j = 0; i < 32; i++, j += 0x18) { ++ WREG32((0x2c14 + j), 0x00000000); ++ WREG32((0x2c18 + j), 0x00000000); ++ WREG32((0x2c1c + j), 0x00000000); ++ WREG32((0x2c20 + j), 0x00000000); ++ WREG32((0x2c24 + j), 0x00000000); ++ } ++ ++ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); ++ ++ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; ++ ++ cc_gc_shader_pipe_config |= ++ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) ++ & EVERGREEN_MAX_PIPES_MASK); ++ cc_gc_shader_pipe_config |= ++ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) ++ & EVERGREEN_MAX_SIMDS_MASK); ++ ++ cc_rb_backend_disable = ++ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) ++ & EVERGREEN_MAX_BACKENDS_MASK); ++ ++ ++ mc_shared_chmap = RREG32(MC_SHARED_CHMAP); ++ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); ++ ++ switch (rdev->config.evergreen.max_tile_pipes) { ++ case 1: ++ default: ++ gb_addr_config |= NUM_PIPES(0); ++ break; ++ case 2: ++ gb_addr_config |= NUM_PIPES(1); ++ break; ++ case 4: ++ gb_addr_config |= NUM_PIPES(2); ++ break; ++ case 8: ++ gb_addr_config |= NUM_PIPES(3); ++ break; ++ } ++ ++ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); ++ gb_addr_config |= BANK_INTERLEAVE_SIZE(0); ++ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); ++ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); ++ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ ++ gb_addr_config |= MULTI_GPU_TILE_SIZE(2); ++ ++ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) ++ gb_addr_config |= ROW_SIZE(2); ++ else ++ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); ++ ++ if (rdev->ddev->pdev->device == 0x689e) { ++ u32 efuse_straps_4; ++ u32 efuse_straps_3; ++ u8 efuse_box_bit_131_124; ++ ++ WREG32(RCU_IND_INDEX, 0x204); ++ efuse_straps_4 = RREG32(RCU_IND_DATA); ++ WREG32(RCU_IND_INDEX, 0x203); ++ efuse_straps_3 = RREG32(RCU_IND_DATA); ++ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); ++ ++ switch(efuse_box_bit_131_124) { ++ case 0x00: ++ gb_backend_map = 0x76543210; ++ break; ++ case 0x55: ++ gb_backend_map = 0x77553311; ++ break; ++ case 0x56: ++ gb_backend_map = 0x77553300; ++ break; ++ case 0x59: ++ gb_backend_map = 0x77552211; ++ break; ++ case 0x66: ++ gb_backend_map = 0x77443300; ++ break; ++ case 0x99: ++ gb_backend_map = 0x66552211; ++ break; ++ case 0x5a: ++ gb_backend_map = 0x77552200; ++ break; ++ case 0xaa: ++ gb_backend_map = 0x66442200; ++ break; ++ case 0x95: ++ gb_backend_map = 0x66553311; ++ break; ++ default: ++ DRM_ERROR("bad backend map, using default\n"); ++ gb_backend_map = ++ evergreen_get_tile_pipe_to_backend_map(rdev, ++ rdev->config.evergreen.max_tile_pipes, ++ rdev->config.evergreen.max_backends, ++ ((EVERGREEN_MAX_BACKENDS_MASK << ++ rdev->config.evergreen.max_backends) & ++ EVERGREEN_MAX_BACKENDS_MASK)); ++ break; ++ } ++ } else if (rdev->ddev->pdev->device == 0x68b9) { ++ u32 efuse_straps_3; ++ u8 efuse_box_bit_127_124; ++ ++ WREG32(RCU_IND_INDEX, 0x203); ++ efuse_straps_3 = RREG32(RCU_IND_DATA); ++ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; ++ ++ switch(efuse_box_bit_127_124) { ++ case 0x0: ++ gb_backend_map = 0x00003210; ++ break; ++ case 0x5: ++ case 0x6: ++ case 0x9: ++ case 0xa: ++ gb_backend_map = 0x00003311; ++ break; ++ default: ++ DRM_ERROR("bad backend map, using default\n"); ++ gb_backend_map = ++ evergreen_get_tile_pipe_to_backend_map(rdev, ++ rdev->config.evergreen.max_tile_pipes, ++ rdev->config.evergreen.max_backends, ++ ((EVERGREEN_MAX_BACKENDS_MASK << ++ rdev->config.evergreen.max_backends) & ++ EVERGREEN_MAX_BACKENDS_MASK)); ++ break; ++ } ++ } else ++ gb_backend_map = ++ evergreen_get_tile_pipe_to_backend_map(rdev, ++ rdev->config.evergreen.max_tile_pipes, ++ rdev->config.evergreen.max_backends, ++ ((EVERGREEN_MAX_BACKENDS_MASK << ++ rdev->config.evergreen.max_backends) & ++ EVERGREEN_MAX_BACKENDS_MASK)); ++ ++ WREG32(GB_BACKEND_MAP, gb_backend_map); ++ WREG32(GB_ADDR_CONFIG, gb_addr_config); ++ WREG32(DMIF_ADDR_CONFIG, gb_addr_config); ++ WREG32(HDP_ADDR_CONFIG, gb_addr_config); ++ ++ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; ++ grbm_gfx_index = INSTANCE_BROADCAST_WRITES; ++ ++ for (i = 0; i < rdev->config.evergreen.num_ses; i++) { ++ u32 rb = cc_rb_backend_disable | (0xf0 << 16); ++ u32 sp = cc_gc_shader_pipe_config; ++ u32 gfx = grbm_gfx_index | SE_INDEX(i); ++ ++ if (i == num_shader_engines) { ++ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); ++ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); ++ } ++ ++ WREG32(GRBM_GFX_INDEX, gfx); ++ WREG32(RLC_GFX_INDEX, gfx); ++ ++ WREG32(CC_RB_BACKEND_DISABLE, rb); ++ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); ++ WREG32(GC_USER_RB_BACKEND_DISABLE, rb); ++ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); ++ } ++ ++ grbm_gfx_index |= SE_BROADCAST_WRITES; ++ WREG32(GRBM_GFX_INDEX, grbm_gfx_index); ++ WREG32(RLC_GFX_INDEX, grbm_gfx_index); ++ ++ WREG32(CGTS_SYS_TCC_DISABLE, 0); ++ WREG32(CGTS_TCC_DISABLE, 0); ++ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); ++ WREG32(CGTS_USER_TCC_DISABLE, 0); ++ ++ /* set HW defaults for 3D engine */ ++ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ++ ROQ_IB2_START(0x2b))); ++ ++ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); ++ ++ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | ++ SYNC_GRADIENT | ++ SYNC_WALKER | ++ SYNC_ALIGNER)); ++ ++ sx_debug_1 = RREG32(SX_DEBUG_1); ++ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; ++ WREG32(SX_DEBUG_1, sx_debug_1); ++ ++ ++ smx_dc_ctl0 = RREG32(SMX_DC_CTL0); ++ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); ++ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); ++ WREG32(SMX_DC_CTL0, smx_dc_ctl0); ++ ++ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | ++ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | ++ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); ++ ++ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | ++ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | ++ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); ++ ++ WREG32(VGT_NUM_INSTANCES, 1); ++ WREG32(SPI_CONFIG_CNTL, 0); ++ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); ++ WREG32(CP_PERFMON_CNTL, 0); ++ ++ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | ++ FETCH_FIFO_HIWATER(0x4) | ++ DONE_FIFO_HIWATER(0xe0) | ++ ALU_UPDATE_FIFO_HIWATER(0x8))); ++ ++ sq_config = RREG32(SQ_CONFIG); ++ sq_config &= ~(PS_PRIO(3) | ++ VS_PRIO(3) | ++ GS_PRIO(3) | ++ ES_PRIO(3)); ++ sq_config |= (VC_ENABLE | ++ EXPORT_SRC_C | ++ PS_PRIO(0) | ++ VS_PRIO(1) | ++ GS_PRIO(2) | ++ ES_PRIO(3)); ++ ++ if (rdev->family == CHIP_CEDAR) ++ /* no vertex cache */ ++ sq_config &= ~VC_ENABLE; ++ ++ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); ++ ++ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); ++ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); ++ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); ++ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); ++ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); ++ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); ++ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); ++ ++ if (rdev->family == CHIP_CEDAR) ++ ps_thread_count = 96; ++ else ++ ps_thread_count = 128; ++ ++ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); ++ sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; ++ sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; ++ sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; ++ sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; ++ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; ++ ++ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ++ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ++ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ++ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ++ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ++ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); ++ ++ WREG32(SQ_CONFIG, sq_config); ++ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); ++ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); ++ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); ++ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); ++ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); ++ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); ++ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); ++ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); ++ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); ++ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); ++ ++ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | ++ FORCE_EOV_MAX_REZ_CNT(255))); ++ ++ if (rdev->family == CHIP_CEDAR) ++ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); ++ else ++ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); ++ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); ++ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); ++ ++ WREG32(VGT_GS_VERTEX_REUSE, 16); ++ WREG32(PA_SC_LINE_STIPPLE_STATE, 0); ++ ++ WREG32(CB_PERF_CTR0_SEL_0, 0); ++ WREG32(CB_PERF_CTR0_SEL_1, 0); ++ WREG32(CB_PERF_CTR1_SEL_0, 0); ++ WREG32(CB_PERF_CTR1_SEL_1, 0); ++ WREG32(CB_PERF_CTR2_SEL_0, 0); ++ WREG32(CB_PERF_CTR2_SEL_1, 0); ++ WREG32(CB_PERF_CTR3_SEL_0, 0); ++ WREG32(CB_PERF_CTR3_SEL_1, 0); ++ ++ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); ++ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); ++ ++ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); ++ ++ udelay(50); ++ + } + + int evergreen_mc_init(struct radeon_device *rdev) +@@ -476,26 +1318,627 @@ int evergreen_mc_init(struct radeon_device *rdev) + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.visible_vram_size = rdev->mc.aper_size; +- /* FIXME remove this once we support unmappable VRAM */ +- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { +- rdev->mc.mc_vram_size = rdev->mc.aper_size; +- rdev->mc.real_vram_size = rdev->mc.aper_size; +- } + r600_vram_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); + + return 0; + } + +-int evergreen_gpu_reset(struct radeon_device *rdev) ++bool evergreen_gpu_is_lockup(struct radeon_device *rdev) + { + /* FIXME: implement for evergreen */ ++ return false; ++} ++ ++static int evergreen_gpu_soft_reset(struct radeon_device *rdev) ++{ ++ struct evergreen_mc_save save; ++ u32 srbm_reset = 0; ++ u32 grbm_reset = 0; ++ ++ dev_info(rdev->dev, "GPU softreset \n"); ++ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", ++ RREG32(GRBM_STATUS)); ++ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", ++ RREG32(GRBM_STATUS_SE0)); ++ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", ++ RREG32(GRBM_STATUS_SE1)); ++ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", ++ RREG32(SRBM_STATUS)); ++ evergreen_mc_stop(rdev, &save); ++ if (evergreen_mc_wait_for_idle(rdev)) { ++ dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); ++ } ++ /* Disable CP parsing/prefetching */ ++ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); ++ ++ /* reset all the gfx blocks */ ++ grbm_reset = (SOFT_RESET_CP | ++ SOFT_RESET_CB | ++ SOFT_RESET_DB | ++ SOFT_RESET_PA | ++ SOFT_RESET_SC | ++ SOFT_RESET_SPI | ++ SOFT_RESET_SH | ++ SOFT_RESET_SX | ++ SOFT_RESET_TC | ++ SOFT_RESET_TA | ++ SOFT_RESET_VC | ++ SOFT_RESET_VGT); ++ ++ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); ++ WREG32(GRBM_SOFT_RESET, grbm_reset); ++ (void)RREG32(GRBM_SOFT_RESET); ++ udelay(50); ++ WREG32(GRBM_SOFT_RESET, 0); ++ (void)RREG32(GRBM_SOFT_RESET); ++ ++ /* reset all the system blocks */ ++ srbm_reset = SRBM_SOFT_RESET_ALL_MASK; ++ ++ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); ++ WREG32(SRBM_SOFT_RESET, srbm_reset); ++ (void)RREG32(SRBM_SOFT_RESET); ++ udelay(50); ++ WREG32(SRBM_SOFT_RESET, 0); ++ (void)RREG32(SRBM_SOFT_RESET); ++ /* Wait a little for things to settle down */ ++ udelay(50); ++ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", ++ RREG32(GRBM_STATUS)); ++ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", ++ RREG32(GRBM_STATUS_SE0)); ++ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", ++ RREG32(GRBM_STATUS_SE1)); ++ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", ++ RREG32(SRBM_STATUS)); ++ /* After reset we need to reinit the asic as GPU often endup in an ++ * incoherent state. ++ */ ++ atom_asic_init(rdev->mode_info.atom_context); ++ evergreen_mc_resume(rdev, &save); ++ return 0; ++} ++ ++int evergreen_asic_reset(struct radeon_device *rdev) ++{ ++ return evergreen_gpu_soft_reset(rdev); ++} ++ ++/* Interrupts */ ++ ++u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) ++{ ++ switch (crtc) { ++ case 0: ++ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); ++ case 1: ++ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); ++ case 2: ++ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); ++ case 3: ++ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); ++ case 4: ++ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); ++ case 5: ++ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); ++ default: ++ return 0; ++ } ++} ++ ++void evergreen_disable_interrupt_state(struct radeon_device *rdev) ++{ ++ u32 tmp; ++ ++ WREG32(CP_INT_CNTL, 0); ++ WREG32(GRBM_INT_CNTL, 0); ++ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); ++ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); ++ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); ++ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); ++ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); ++ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); ++ ++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); ++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); ++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); ++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); ++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); ++ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); ++ ++ WREG32(DACA_AUTODETECT_INT_CONTROL, 0); ++ WREG32(DACB_AUTODETECT_INT_CONTROL, 0); ++ ++ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD1_INT_CONTROL, tmp); ++ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD2_INT_CONTROL, tmp); ++ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD3_INT_CONTROL, tmp); ++ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD4_INT_CONTROL, tmp); ++ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD5_INT_CONTROL, tmp); ++ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; ++ WREG32(DC_HPD6_INT_CONTROL, tmp); ++ ++} ++ ++int evergreen_irq_set(struct radeon_device *rdev) ++{ ++ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; ++ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; ++ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; ++ u32 grbm_int_cntl = 0; ++ ++ if (!rdev->irq.installed) { ++ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); ++ return -EINVAL; ++ } ++ /* don't enable anything if the ih is disabled */ ++ if (!rdev->ih.enabled) { ++ r600_disable_interrupts(rdev); ++ /* force the active interrupt state to all disabled */ ++ evergreen_disable_interrupt_state(rdev); ++ return 0; ++ } ++ ++ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; ++ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; ++ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; ++ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; ++ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; ++ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; ++ ++ if (rdev->irq.sw_int) { ++ DRM_DEBUG("evergreen_irq_set: sw int\n"); ++ cp_int_cntl |= RB_INT_ENABLE; ++ } ++ if (rdev->irq.crtc_vblank_int[0]) { ++ DRM_DEBUG("evergreen_irq_set: vblank 0\n"); ++ crtc1 |= VBLANK_INT_MASK; ++ } ++ if (rdev->irq.crtc_vblank_int[1]) { ++ DRM_DEBUG("evergreen_irq_set: vblank 1\n"); ++ crtc2 |= VBLANK_INT_MASK; ++ } ++ if (rdev->irq.crtc_vblank_int[2]) { ++ DRM_DEBUG("evergreen_irq_set: vblank 2\n"); ++ crtc3 |= VBLANK_INT_MASK; ++ } ++ if (rdev->irq.crtc_vblank_int[3]) { ++ DRM_DEBUG("evergreen_irq_set: vblank 3\n"); ++ crtc4 |= VBLANK_INT_MASK; ++ } ++ if (rdev->irq.crtc_vblank_int[4]) { ++ DRM_DEBUG("evergreen_irq_set: vblank 4\n"); ++ crtc5 |= VBLANK_INT_MASK; ++ } ++ if (rdev->irq.crtc_vblank_int[5]) { ++ DRM_DEBUG("evergreen_irq_set: vblank 5\n"); ++ crtc6 |= VBLANK_INT_MASK; ++ } ++ if (rdev->irq.hpd[0]) { ++ DRM_DEBUG("evergreen_irq_set: hpd 1\n"); ++ hpd1 |= DC_HPDx_INT_EN; ++ } ++ if (rdev->irq.hpd[1]) { ++ DRM_DEBUG("evergreen_irq_set: hpd 2\n"); ++ hpd2 |= DC_HPDx_INT_EN; ++ } ++ if (rdev->irq.hpd[2]) { ++ DRM_DEBUG("evergreen_irq_set: hpd 3\n"); ++ hpd3 |= DC_HPDx_INT_EN; ++ } ++ if (rdev->irq.hpd[3]) { ++ DRM_DEBUG("evergreen_irq_set: hpd 4\n"); ++ hpd4 |= DC_HPDx_INT_EN; ++ } ++ if (rdev->irq.hpd[4]) { ++ DRM_DEBUG("evergreen_irq_set: hpd 5\n"); ++ hpd5 |= DC_HPDx_INT_EN; ++ } ++ if (rdev->irq.hpd[5]) { ++ DRM_DEBUG("evergreen_irq_set: hpd 6\n"); ++ hpd6 |= DC_HPDx_INT_EN; ++ } ++ if (rdev->irq.gui_idle) { ++ DRM_DEBUG("gui idle\n"); ++ grbm_int_cntl |= GUI_IDLE_INT_ENABLE; ++ } ++ ++ WREG32(CP_INT_CNTL, cp_int_cntl); ++ WREG32(GRBM_INT_CNTL, grbm_int_cntl); ++ ++ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); ++ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); ++ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); ++ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); ++ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); ++ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); ++ ++ WREG32(DC_HPD1_INT_CONTROL, hpd1); ++ WREG32(DC_HPD2_INT_CONTROL, hpd2); ++ WREG32(DC_HPD3_INT_CONTROL, hpd3); ++ WREG32(DC_HPD4_INT_CONTROL, hpd4); ++ WREG32(DC_HPD5_INT_CONTROL, hpd5); ++ WREG32(DC_HPD6_INT_CONTROL, hpd6); ++ + return 0; + } + ++static inline void evergreen_irq_ack(struct radeon_device *rdev, ++ u32 *disp_int, ++ u32 *disp_int_cont, ++ u32 *disp_int_cont2, ++ u32 *disp_int_cont3, ++ u32 *disp_int_cont4, ++ u32 *disp_int_cont5) ++{ ++ u32 tmp; ++ ++ *disp_int = RREG32(DISP_INTERRUPT_STATUS); ++ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); ++ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); ++ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); ++ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); ++ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); ++ ++ if (*disp_int & LB_D1_VBLANK_INTERRUPT) ++ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); ++ if (*disp_int & LB_D1_VLINE_INTERRUPT) ++ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); ++ ++ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) ++ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); ++ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) ++ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); ++ ++ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) ++ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); ++ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) ++ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); ++ ++ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) ++ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); ++ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) ++ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); ++ ++ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) ++ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); ++ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) ++ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); ++ ++ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) ++ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); ++ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) ++ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); ++ ++ if (*disp_int & DC_HPD1_INTERRUPT) { ++ tmp = RREG32(DC_HPD1_INT_CONTROL); ++ tmp |= DC_HPDx_INT_ACK; ++ WREG32(DC_HPD1_INT_CONTROL, tmp); ++ } ++ if (*disp_int_cont & DC_HPD2_INTERRUPT) { ++ tmp = RREG32(DC_HPD2_INT_CONTROL); ++ tmp |= DC_HPDx_INT_ACK; ++ WREG32(DC_HPD2_INT_CONTROL, tmp); ++ } ++ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { ++ tmp = RREG32(DC_HPD3_INT_CONTROL); ++ tmp |= DC_HPDx_INT_ACK; ++ WREG32(DC_HPD3_INT_CONTROL, tmp); ++ } ++ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { ++ tmp = RREG32(DC_HPD4_INT_CONTROL); ++ tmp |= DC_HPDx_INT_ACK; ++ WREG32(DC_HPD4_INT_CONTROL, tmp); ++ } ++ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { ++ tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp |= DC_HPDx_INT_ACK; ++ WREG32(DC_HPD5_INT_CONTROL, tmp); ++ } ++ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { ++ tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp |= DC_HPDx_INT_ACK; ++ WREG32(DC_HPD6_INT_CONTROL, tmp); ++ } ++} ++ ++void evergreen_irq_disable(struct radeon_device *rdev) ++{ ++ u32 disp_int, disp_int_cont, disp_int_cont2; ++ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; ++ ++ r600_disable_interrupts(rdev); ++ /* Wait and acknowledge irq */ ++ mdelay(1); ++ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, ++ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); ++ evergreen_disable_interrupt_state(rdev); ++} ++ ++static void evergreen_irq_suspend(struct radeon_device *rdev) ++{ ++ evergreen_irq_disable(rdev); ++ r600_rlc_stop(rdev); ++} ++ ++static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) ++{ ++ u32 wptr, tmp; ++ ++ /* XXX use writeback */ ++ wptr = RREG32(IH_RB_WPTR); ++ ++ if (wptr & RB_OVERFLOW) { ++ /* When a ring buffer overflow happen start parsing interrupt ++ * from the last not overwritten vector (wptr + 16). Hopefully ++ * this should allow us to catchup. ++ */ ++ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", ++ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); ++ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; ++ tmp = RREG32(IH_RB_CNTL); ++ tmp |= IH_WPTR_OVERFLOW_CLEAR; ++ WREG32(IH_RB_CNTL, tmp); ++ } ++ return (wptr & rdev->ih.ptr_mask); ++} ++ ++int evergreen_irq_process(struct radeon_device *rdev) ++{ ++ u32 wptr = evergreen_get_ih_wptr(rdev); ++ u32 rptr = rdev->ih.rptr; ++ u32 src_id, src_data; ++ u32 ring_index; ++ u32 disp_int, disp_int_cont, disp_int_cont2; ++ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; ++ unsigned long flags; ++ bool queue_hotplug = false; ++ ++ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); ++ if (!rdev->ih.enabled) ++ return IRQ_NONE; ++ ++ spin_lock_irqsave(&rdev->ih.lock, flags); ++ ++ if (rptr == wptr) { ++ spin_unlock_irqrestore(&rdev->ih.lock, flags); ++ return IRQ_NONE; ++ } ++ if (rdev->shutdown) { ++ spin_unlock_irqrestore(&rdev->ih.lock, flags); ++ return IRQ_NONE; ++ } ++ ++restart_ih: ++ /* display interrupts */ ++ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, ++ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); ++ ++ rdev->ih.wptr = wptr; ++ while (rptr != wptr) { ++ /* wptr/rptr are in bytes! */ ++ ring_index = rptr / 4; ++ src_id = rdev->ih.ring[ring_index] & 0xff; ++ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; ++ ++ switch (src_id) { ++ case 1: /* D1 vblank/vline */ ++ switch (src_data) { ++ case 0: /* D1 vblank */ ++ if (disp_int & LB_D1_VBLANK_INTERRUPT) { ++ drm_handle_vblank(rdev->ddev, 0); ++ wake_up(&rdev->irq.vblank_queue); ++ disp_int &= ~LB_D1_VBLANK_INTERRUPT; ++ DRM_DEBUG("IH: D1 vblank\n"); ++ } ++ break; ++ case 1: /* D1 vline */ ++ if (disp_int & LB_D1_VLINE_INTERRUPT) { ++ disp_int &= ~LB_D1_VLINE_INTERRUPT; ++ DRM_DEBUG("IH: D1 vline\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 2: /* D2 vblank/vline */ ++ switch (src_data) { ++ case 0: /* D2 vblank */ ++ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { ++ drm_handle_vblank(rdev->ddev, 1); ++ wake_up(&rdev->irq.vblank_queue); ++ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; ++ DRM_DEBUG("IH: D2 vblank\n"); ++ } ++ break; ++ case 1: /* D2 vline */ ++ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { ++ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; ++ DRM_DEBUG("IH: D2 vline\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 3: /* D3 vblank/vline */ ++ switch (src_data) { ++ case 0: /* D3 vblank */ ++ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { ++ drm_handle_vblank(rdev->ddev, 2); ++ wake_up(&rdev->irq.vblank_queue); ++ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; ++ DRM_DEBUG("IH: D3 vblank\n"); ++ } ++ break; ++ case 1: /* D3 vline */ ++ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { ++ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; ++ DRM_DEBUG("IH: D3 vline\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 4: /* D4 vblank/vline */ ++ switch (src_data) { ++ case 0: /* D4 vblank */ ++ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { ++ drm_handle_vblank(rdev->ddev, 3); ++ wake_up(&rdev->irq.vblank_queue); ++ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; ++ DRM_DEBUG("IH: D4 vblank\n"); ++ } ++ break; ++ case 1: /* D4 vline */ ++ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { ++ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; ++ DRM_DEBUG("IH: D4 vline\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 5: /* D5 vblank/vline */ ++ switch (src_data) { ++ case 0: /* D5 vblank */ ++ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { ++ drm_handle_vblank(rdev->ddev, 4); ++ wake_up(&rdev->irq.vblank_queue); ++ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; ++ DRM_DEBUG("IH: D5 vblank\n"); ++ } ++ break; ++ case 1: /* D5 vline */ ++ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { ++ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; ++ DRM_DEBUG("IH: D5 vline\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 6: /* D6 vblank/vline */ ++ switch (src_data) { ++ case 0: /* D6 vblank */ ++ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { ++ drm_handle_vblank(rdev->ddev, 5); ++ wake_up(&rdev->irq.vblank_queue); ++ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; ++ DRM_DEBUG("IH: D6 vblank\n"); ++ } ++ break; ++ case 1: /* D6 vline */ ++ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { ++ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; ++ DRM_DEBUG("IH: D6 vline\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 42: /* HPD hotplug */ ++ switch (src_data) { ++ case 0: ++ if (disp_int & DC_HPD1_INTERRUPT) { ++ disp_int &= ~DC_HPD1_INTERRUPT; ++ queue_hotplug = true; ++ DRM_DEBUG("IH: HPD1\n"); ++ } ++ break; ++ case 1: ++ if (disp_int_cont & DC_HPD2_INTERRUPT) { ++ disp_int_cont &= ~DC_HPD2_INTERRUPT; ++ queue_hotplug = true; ++ DRM_DEBUG("IH: HPD2\n"); ++ } ++ break; ++ case 2: ++ if (disp_int_cont2 & DC_HPD3_INTERRUPT) { ++ disp_int_cont2 &= ~DC_HPD3_INTERRUPT; ++ queue_hotplug = true; ++ DRM_DEBUG("IH: HPD3\n"); ++ } ++ break; ++ case 3: ++ if (disp_int_cont3 & DC_HPD4_INTERRUPT) { ++ disp_int_cont3 &= ~DC_HPD4_INTERRUPT; ++ queue_hotplug = true; ++ DRM_DEBUG("IH: HPD4\n"); ++ } ++ break; ++ case 4: ++ if (disp_int_cont4 & DC_HPD5_INTERRUPT) { ++ disp_int_cont4 &= ~DC_HPD5_INTERRUPT; ++ queue_hotplug = true; ++ DRM_DEBUG("IH: HPD5\n"); ++ } ++ break; ++ case 5: ++ if (disp_int_cont5 & DC_HPD6_INTERRUPT) { ++ disp_int_cont5 &= ~DC_HPD6_INTERRUPT; ++ queue_hotplug = true; ++ DRM_DEBUG("IH: HPD6\n"); ++ } ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ break; ++ case 176: /* CP_INT in ring buffer */ ++ case 177: /* CP_INT in IB1 */ ++ case 178: /* CP_INT in IB2 */ ++ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); ++ radeon_fence_process(rdev); ++ break; ++ case 181: /* CP EOP event */ ++ DRM_DEBUG("IH: CP EOP\n"); ++ break; ++ case 233: /* GUI IDLE */ ++ DRM_DEBUG("IH: CP EOP\n"); ++ rdev->pm.gui_idle = true; ++ wake_up(&rdev->irq.idle_queue); ++ break; ++ default: ++ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); ++ break; ++ } ++ ++ /* wptr/rptr are in bytes! */ ++ rptr += 16; ++ rptr &= rdev->ih.ptr_mask; ++ } ++ /* make sure wptr hasn't changed while processing */ ++ wptr = evergreen_get_ih_wptr(rdev); ++ if (wptr != rdev->ih.wptr) ++ goto restart_ih; ++ if (queue_hotplug) ++ queue_work(rdev->wq, &rdev->hotplug_work); ++ rdev->ih.rptr = rptr; ++ WREG32(IH_RB_RPTR, rdev->ih.rptr); ++ spin_unlock_irqrestore(&rdev->ih.lock, flags); ++ return IRQ_HANDLED; ++} ++ + static int evergreen_startup(struct radeon_device *rdev) + { +-#if 0 + int r; + + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { +@@ -505,17 +1948,15 @@ static int evergreen_startup(struct radeon_device *rdev) + return r; + } + } +-#endif ++ + evergreen_mc_program(rdev); +-#if 0 + if (rdev->flags & RADEON_IS_AGP) { +- evergreem_agp_enable(rdev); ++ evergreen_agp_enable(rdev); + } else { + r = evergreen_pcie_gart_enable(rdev); + if (r) + return r; + } +-#endif + evergreen_gpu_init(rdev); + #if 0 + if (!rdev->r600_blit.shader_obj) { +@@ -536,6 +1977,7 @@ static int evergreen_startup(struct radeon_device *rdev) + DRM_ERROR("failed to pin blit object %d\n", r); + return r; + } ++#endif + + /* Enable IRQ */ + r = r600_irq_init(rdev); +@@ -544,7 +1986,7 @@ static int evergreen_startup(struct radeon_device *rdev) + radeon_irq_kms_fini(rdev); + return r; + } +- r600_irq_set(rdev); ++ evergreen_irq_set(rdev); + + r = radeon_ring_init(rdev, rdev->cp.ring_size); + if (r) +@@ -552,12 +1994,12 @@ static int evergreen_startup(struct radeon_device *rdev) + r = evergreen_cp_load_microcode(rdev); + if (r) + return r; +- r = r600_cp_resume(rdev); ++ r = evergreen_cp_resume(rdev); + if (r) + return r; + /* write back buffer are not vital so don't worry about failure */ + r600_wb_enable(rdev); +-#endif ++ + return 0; + } + +@@ -582,13 +2024,13 @@ int evergreen_resume(struct radeon_device *rdev) + DRM_ERROR("r600 startup failed on resume\n"); + return r; + } +-#if 0 ++ + r = r600_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + return r; + } +-#endif ++ + return r; + + } +@@ -597,12 +2039,14 @@ int evergreen_suspend(struct radeon_device *rdev) + { + #if 0 + int r; +- ++#endif + /* FIXME: we should wait for ring to be empty */ + r700_cp_stop(rdev); + rdev->cp.ready = false; ++ evergreen_irq_suspend(rdev); + r600_wb_disable(rdev); + evergreen_pcie_gart_disable(rdev); ++#if 0 + /* unpin shaders bo */ + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (likely(r == 0)) { +@@ -682,8 +2126,6 @@ int evergreen_init(struct radeon_device *rdev) + r = radeon_clocks_init(rdev); + if (r) + return r; +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) +@@ -702,7 +2144,7 @@ int evergreen_init(struct radeon_device *rdev) + r = radeon_bo_init(rdev); + if (r) + return r; +-#if 0 ++ + r = radeon_irq_kms_init(rdev); + if (r) + return r; +@@ -716,14 +2158,16 @@ int evergreen_init(struct radeon_device *rdev) + r = r600_pcie_gart_init(rdev); + if (r) + return r; +-#endif +- rdev->accel_working = false; ++ ++ rdev->accel_working = true; + r = evergreen_startup(rdev); + if (r) { +- evergreen_suspend(rdev); +- /*r600_wb_fini(rdev);*/ +- /*radeon_ring_fini(rdev);*/ +- /*evergreen_pcie_gart_fini(rdev);*/ ++ dev_err(rdev->dev, "disabling GPU acceleration\n"); ++ r700_cp_fini(rdev); ++ r600_wb_fini(rdev); ++ r600_irq_fini(rdev); ++ radeon_irq_kms_fini(rdev); ++ evergreen_pcie_gart_fini(rdev); + rdev->accel_working = false; + } + if (rdev->accel_working) { +@@ -743,16 +2187,12 @@ int evergreen_init(struct radeon_device *rdev) + + void evergreen_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); +- evergreen_suspend(rdev); +-#if 0 +- r600_blit_fini(rdev); ++ /*r600_blit_fini(rdev);*/ ++ r700_cp_fini(rdev); ++ r600_wb_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); +- radeon_ring_fini(rdev); +- r600_wb_fini(rdev); + evergreen_pcie_gart_fini(rdev); +-#endif + radeon_gem_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_clocks_fini(rdev); +diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c +new file mode 100644 +index 0000000..64516b9 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/evergreen_cs.c +@@ -0,0 +1,1356 @@ ++/* ++ * Copyright 2010 Advanced Micro Devices, Inc. ++ * Copyright 2008 Red Hat Inc. ++ * Copyright 2009 Jerome Glisse. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Alex Deucher ++ * Jerome Glisse ++ */ ++#include "drmP.h" ++#include "radeon.h" ++#include "evergreend.h" ++#include "evergreen_reg_safe.h" ++ ++static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, ++ struct radeon_cs_reloc **cs_reloc); ++ ++struct evergreen_cs_track { ++ u32 group_size; ++ u32 nbanks; ++ u32 npipes; ++ /* value we track */ ++ u32 nsamples; ++ u32 cb_color_base_last[12]; ++ struct radeon_bo *cb_color_bo[12]; ++ u32 cb_color_bo_offset[12]; ++ struct radeon_bo *cb_color_fmask_bo[8]; ++ struct radeon_bo *cb_color_cmask_bo[8]; ++ u32 cb_color_info[12]; ++ u32 cb_color_view[12]; ++ u32 cb_color_pitch_idx[12]; ++ u32 cb_color_slice_idx[12]; ++ u32 cb_color_dim_idx[12]; ++ u32 cb_color_dim[12]; ++ u32 cb_color_pitch[12]; ++ u32 cb_color_slice[12]; ++ u32 cb_color_cmask_slice[8]; ++ u32 cb_color_fmask_slice[8]; ++ u32 cb_target_mask; ++ u32 cb_shader_mask; ++ u32 vgt_strmout_config; ++ u32 vgt_strmout_buffer_config; ++ u32 db_depth_control; ++ u32 db_depth_view; ++ u32 db_depth_size; ++ u32 db_depth_size_idx; ++ u32 db_z_info; ++ u32 db_z_idx; ++ u32 db_z_read_offset; ++ u32 db_z_write_offset; ++ struct radeon_bo *db_z_read_bo; ++ struct radeon_bo *db_z_write_bo; ++ u32 db_s_info; ++ u32 db_s_idx; ++ u32 db_s_read_offset; ++ u32 db_s_write_offset; ++ struct radeon_bo *db_s_read_bo; ++ struct radeon_bo *db_s_write_bo; ++}; ++ ++static void evergreen_cs_track_init(struct evergreen_cs_track *track) ++{ ++ int i; ++ ++ for (i = 0; i < 8; i++) { ++ track->cb_color_fmask_bo[i] = NULL; ++ track->cb_color_cmask_bo[i] = NULL; ++ track->cb_color_cmask_slice[i] = 0; ++ track->cb_color_fmask_slice[i] = 0; ++ } ++ ++ for (i = 0; i < 12; i++) { ++ track->cb_color_base_last[i] = 0; ++ track->cb_color_bo[i] = NULL; ++ track->cb_color_bo_offset[i] = 0xFFFFFFFF; ++ track->cb_color_info[i] = 0; ++ track->cb_color_view[i] = 0; ++ track->cb_color_pitch_idx[i] = 0; ++ track->cb_color_slice_idx[i] = 0; ++ track->cb_color_dim[i] = 0; ++ track->cb_color_pitch[i] = 0; ++ track->cb_color_slice[i] = 0; ++ track->cb_color_dim[i] = 0; ++ } ++ track->cb_target_mask = 0xFFFFFFFF; ++ track->cb_shader_mask = 0xFFFFFFFF; ++ ++ track->db_depth_view = 0xFFFFC000; ++ track->db_depth_size = 0xFFFFFFFF; ++ track->db_depth_size_idx = 0; ++ track->db_depth_control = 0xFFFFFFFF; ++ track->db_z_info = 0xFFFFFFFF; ++ track->db_z_idx = 0xFFFFFFFF; ++ track->db_z_read_offset = 0xFFFFFFFF; ++ track->db_z_write_offset = 0xFFFFFFFF; ++ track->db_z_read_bo = NULL; ++ track->db_z_write_bo = NULL; ++ track->db_s_info = 0xFFFFFFFF; ++ track->db_s_idx = 0xFFFFFFFF; ++ track->db_s_read_offset = 0xFFFFFFFF; ++ track->db_s_write_offset = 0xFFFFFFFF; ++ track->db_s_read_bo = NULL; ++ track->db_s_write_bo = NULL; ++} ++ ++static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) ++{ ++ /* XXX fill in */ ++ return 0; ++} ++ ++static int evergreen_cs_track_check(struct radeon_cs_parser *p) ++{ ++ struct evergreen_cs_track *track = p->track; ++ ++ /* we don't support stream out buffer yet */ ++ if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { ++ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); ++ return -EINVAL; ++ } ++ ++ /* XXX fill in */ ++ return 0; ++} ++ ++/** ++ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet ++ * @parser: parser structure holding parsing context. ++ * @pkt: where to store packet informations ++ * ++ * Assume that chunk_ib_index is properly set. Will return -EINVAL ++ * if packet is bigger than remaining ib size. or if packets is unknown. ++ **/ ++int evergreen_cs_packet_parse(struct radeon_cs_parser *p, ++ struct radeon_cs_packet *pkt, ++ unsigned idx) ++{ ++ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; ++ uint32_t header; ++ ++ if (idx >= ib_chunk->length_dw) { ++ DRM_ERROR("Can not parse packet at %d after CS end %d !\n", ++ idx, ib_chunk->length_dw); ++ return -EINVAL; ++ } ++ header = radeon_get_ib_value(p, idx); ++ pkt->idx = idx; ++ pkt->type = CP_PACKET_GET_TYPE(header); ++ pkt->count = CP_PACKET_GET_COUNT(header); ++ pkt->one_reg_wr = 0; ++ switch (pkt->type) { ++ case PACKET_TYPE0: ++ pkt->reg = CP_PACKET0_GET_REG(header); ++ break; ++ case PACKET_TYPE3: ++ pkt->opcode = CP_PACKET3_GET_OPCODE(header); ++ break; ++ case PACKET_TYPE2: ++ pkt->count = -1; ++ break; ++ default: ++ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); ++ return -EINVAL; ++ } ++ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { ++ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", ++ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/** ++ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 ++ * @parser: parser structure holding parsing context. ++ * @data: pointer to relocation data ++ * @offset_start: starting offset ++ * @offset_mask: offset mask (to align start offset on) ++ * @reloc: reloc informations ++ * ++ * Check next packet is relocation packet3, do bo validation and compute ++ * GPU offset using the provided start. ++ **/ ++static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, ++ struct radeon_cs_reloc **cs_reloc) ++{ ++ struct radeon_cs_chunk *relocs_chunk; ++ struct radeon_cs_packet p3reloc; ++ unsigned idx; ++ int r; ++ ++ if (p->chunk_relocs_idx == -1) { ++ DRM_ERROR("No relocation chunk !\n"); ++ return -EINVAL; ++ } ++ *cs_reloc = NULL; ++ relocs_chunk = &p->chunks[p->chunk_relocs_idx]; ++ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); ++ if (r) { ++ return r; ++ } ++ p->idx += p3reloc.count + 2; ++ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { ++ DRM_ERROR("No packet3 for relocation for packet at %d.\n", ++ p3reloc.idx); ++ return -EINVAL; ++ } ++ idx = radeon_get_ib_value(p, p3reloc.idx + 1); ++ if (idx >= relocs_chunk->length_dw) { ++ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", ++ idx, relocs_chunk->length_dw); ++ return -EINVAL; ++ } ++ /* FIXME: we assume reloc size is 4 dwords */ ++ *cs_reloc = p->relocs_ptr[(idx / 4)]; ++ return 0; ++} ++ ++/** ++ * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc ++ * @parser: parser structure holding parsing context. ++ * ++ * Check next packet is relocation packet3, do bo validation and compute ++ * GPU offset using the provided start. ++ **/ ++static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) ++{ ++ struct radeon_cs_packet p3reloc; ++ int r; ++ ++ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); ++ if (r) { ++ return 0; ++ } ++ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { ++ return 0; ++ } ++ return 1; ++} ++ ++/** ++ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet ++ * @parser: parser structure holding parsing context. ++ * ++ * Userspace sends a special sequence for VLINE waits. ++ * PACKET0 - VLINE_START_END + value ++ * PACKET3 - WAIT_REG_MEM poll vline status reg ++ * RELOC (P3) - crtc_id in reloc. ++ * ++ * This function parses this and relocates the VLINE START END ++ * and WAIT_REG_MEM packets to the correct crtc. ++ * It also detects a switched off crtc and nulls out the ++ * wait in that case. ++ */ ++static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) ++{ ++ struct drm_mode_object *obj; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ struct radeon_cs_packet p3reloc, wait_reg_mem; ++ int crtc_id; ++ int r; ++ uint32_t header, h_idx, reg, wait_reg_mem_info; ++ volatile uint32_t *ib; ++ ++ ib = p->ib->ptr; ++ ++ /* parse the WAIT_REG_MEM */ ++ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); ++ if (r) ++ return r; ++ ++ /* check its a WAIT_REG_MEM */ ++ if (wait_reg_mem.type != PACKET_TYPE3 || ++ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { ++ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); ++ r = -EINVAL; ++ return r; ++ } ++ ++ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); ++ /* bit 4 is reg (0) or mem (1) */ ++ if (wait_reg_mem_info & 0x10) { ++ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); ++ r = -EINVAL; ++ return r; ++ } ++ /* waiting for value to be equal */ ++ if ((wait_reg_mem_info & 0x7) != 0x3) { ++ DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); ++ r = -EINVAL; ++ return r; ++ } ++ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { ++ DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); ++ r = -EINVAL; ++ return r; ++ } ++ ++ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { ++ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); ++ r = -EINVAL; ++ return r; ++ } ++ ++ /* jump over the NOP */ ++ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); ++ if (r) ++ return r; ++ ++ h_idx = p->idx - 2; ++ p->idx += wait_reg_mem.count + 2; ++ p->idx += p3reloc.count + 2; ++ ++ header = radeon_get_ib_value(p, h_idx); ++ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); ++ reg = CP_PACKET0_GET_REG(header); ++ mutex_lock(&p->rdev->ddev->mode_config.mutex); ++ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); ++ if (!obj) { ++ DRM_ERROR("cannot find crtc %d\n", crtc_id); ++ r = -EINVAL; ++ goto out; ++ } ++ crtc = obj_to_crtc(obj); ++ radeon_crtc = to_radeon_crtc(crtc); ++ crtc_id = radeon_crtc->crtc_id; ++ ++ if (!crtc->enabled) { ++ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ ++ ib[h_idx + 2] = PACKET2(0); ++ ib[h_idx + 3] = PACKET2(0); ++ ib[h_idx + 4] = PACKET2(0); ++ ib[h_idx + 5] = PACKET2(0); ++ ib[h_idx + 6] = PACKET2(0); ++ ib[h_idx + 7] = PACKET2(0); ++ ib[h_idx + 8] = PACKET2(0); ++ } else { ++ switch (reg) { ++ case EVERGREEN_VLINE_START_END: ++ header &= ~R600_CP_PACKET0_REG_MASK; ++ header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; ++ ib[h_idx] = header; ++ ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; ++ break; ++ default: ++ DRM_ERROR("unknown crtc reloc\n"); ++ r = -EINVAL; ++ goto out; ++ } ++ } ++out: ++ mutex_unlock(&p->rdev->ddev->mode_config.mutex); ++ return r; ++} ++ ++static int evergreen_packet0_check(struct radeon_cs_parser *p, ++ struct radeon_cs_packet *pkt, ++ unsigned idx, unsigned reg) ++{ ++ int r; ++ ++ switch (reg) { ++ case EVERGREEN_VLINE_START_END: ++ r = evergreen_cs_packet_parse_vline(p); ++ if (r) { ++ DRM_ERROR("No reloc for ib[%d]=0x%04X\n", ++ idx, reg); ++ return r; ++ } ++ break; ++ default: ++ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", ++ reg, idx); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, ++ struct radeon_cs_packet *pkt) ++{ ++ unsigned reg, i; ++ unsigned idx; ++ int r; ++ ++ idx = pkt->idx + 1; ++ reg = pkt->reg; ++ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { ++ r = evergreen_packet0_check(p, pkt, idx, reg); ++ if (r) { ++ return r; ++ } ++ } ++ return 0; ++} ++ ++/** ++ * evergreen_cs_check_reg() - check if register is authorized or not ++ * @parser: parser structure holding parsing context ++ * @reg: register we are testing ++ * @idx: index into the cs buffer ++ * ++ * This function will test against evergreen_reg_safe_bm and return 0 ++ * if register is safe. If register is not flag as safe this function ++ * will test it against a list of register needind special handling. ++ */ ++static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) ++{ ++ struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; ++ struct radeon_cs_reloc *reloc; ++ u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); ++ u32 m, i, tmp, *ib; ++ int r; ++ ++ i = (reg >> 7); ++ if (i > last_reg) { ++ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); ++ return -EINVAL; ++ } ++ m = 1 << ((reg >> 2) & 31); ++ if (!(evergreen_reg_safe_bm[i] & m)) ++ return 0; ++ ib = p->ib->ptr; ++ switch (reg) { ++ /* force following reg to 0 in an attemp to disable out buffer ++ * which will need us to better understand how it works to perform ++ * security check on it (Jerome) ++ */ ++ case SQ_ESGS_RING_SIZE: ++ case SQ_GSVS_RING_SIZE: ++ case SQ_ESTMP_RING_SIZE: ++ case SQ_GSTMP_RING_SIZE: ++ case SQ_HSTMP_RING_SIZE: ++ case SQ_LSTMP_RING_SIZE: ++ case SQ_PSTMP_RING_SIZE: ++ case SQ_VSTMP_RING_SIZE: ++ case SQ_ESGS_RING_ITEMSIZE: ++ case SQ_ESTMP_RING_ITEMSIZE: ++ case SQ_GSTMP_RING_ITEMSIZE: ++ case SQ_GSVS_RING_ITEMSIZE: ++ case SQ_GS_VERT_ITEMSIZE: ++ case SQ_GS_VERT_ITEMSIZE_1: ++ case SQ_GS_VERT_ITEMSIZE_2: ++ case SQ_GS_VERT_ITEMSIZE_3: ++ case SQ_GSVS_RING_OFFSET_1: ++ case SQ_GSVS_RING_OFFSET_2: ++ case SQ_GSVS_RING_OFFSET_3: ++ case SQ_HSTMP_RING_ITEMSIZE: ++ case SQ_LSTMP_RING_ITEMSIZE: ++ case SQ_PSTMP_RING_ITEMSIZE: ++ case SQ_VSTMP_RING_ITEMSIZE: ++ case VGT_TF_RING_SIZE: ++ /* get value to populate the IB don't remove */ ++ tmp =radeon_get_ib_value(p, idx); ++ ib[idx] = 0; ++ break; ++ case DB_DEPTH_CONTROL: ++ track->db_depth_control = radeon_get_ib_value(p, idx); ++ break; ++ case DB_Z_INFO: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ track->db_z_info = radeon_get_ib_value(p, idx); ++ ib[idx] &= ~Z_ARRAY_MODE(0xf); ++ track->db_z_info &= ~Z_ARRAY_MODE(0xf); ++ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { ++ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ } else { ++ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ } ++ break; ++ case DB_STENCIL_INFO: ++ track->db_s_info = radeon_get_ib_value(p, idx); ++ break; ++ case DB_DEPTH_VIEW: ++ track->db_depth_view = radeon_get_ib_value(p, idx); ++ break; ++ case DB_DEPTH_SIZE: ++ track->db_depth_size = radeon_get_ib_value(p, idx); ++ track->db_depth_size_idx = idx; ++ break; ++ case DB_Z_READ_BASE: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ track->db_z_read_offset = radeon_get_ib_value(p, idx); ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->db_z_read_bo = reloc->robj; ++ break; ++ case DB_Z_WRITE_BASE: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ track->db_z_write_offset = radeon_get_ib_value(p, idx); ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->db_z_write_bo = reloc->robj; ++ break; ++ case DB_STENCIL_READ_BASE: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ track->db_s_read_offset = radeon_get_ib_value(p, idx); ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->db_s_read_bo = reloc->robj; ++ break; ++ case DB_STENCIL_WRITE_BASE: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ track->db_s_write_offset = radeon_get_ib_value(p, idx); ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->db_s_write_bo = reloc->robj; ++ break; ++ case VGT_STRMOUT_CONFIG: ++ track->vgt_strmout_config = radeon_get_ib_value(p, idx); ++ break; ++ case VGT_STRMOUT_BUFFER_CONFIG: ++ track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); ++ break; ++ case CB_TARGET_MASK: ++ track->cb_target_mask = radeon_get_ib_value(p, idx); ++ break; ++ case CB_SHADER_MASK: ++ track->cb_shader_mask = radeon_get_ib_value(p, idx); ++ break; ++ case PA_SC_AA_CONFIG: ++ tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; ++ track->nsamples = 1 << tmp; ++ break; ++ case CB_COLOR0_VIEW: ++ case CB_COLOR1_VIEW: ++ case CB_COLOR2_VIEW: ++ case CB_COLOR3_VIEW: ++ case CB_COLOR4_VIEW: ++ case CB_COLOR5_VIEW: ++ case CB_COLOR6_VIEW: ++ case CB_COLOR7_VIEW: ++ tmp = (reg - CB_COLOR0_VIEW) / 0x3c; ++ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); ++ break; ++ case CB_COLOR8_VIEW: ++ case CB_COLOR9_VIEW: ++ case CB_COLOR10_VIEW: ++ case CB_COLOR11_VIEW: ++ tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; ++ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); ++ break; ++ case CB_COLOR0_INFO: ++ case CB_COLOR1_INFO: ++ case CB_COLOR2_INFO: ++ case CB_COLOR3_INFO: ++ case CB_COLOR4_INFO: ++ case CB_COLOR5_INFO: ++ case CB_COLOR6_INFO: ++ case CB_COLOR7_INFO: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ tmp = (reg - CB_COLOR0_INFO) / 0x3c; ++ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); ++ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { ++ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { ++ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ } ++ break; ++ case CB_COLOR8_INFO: ++ case CB_COLOR9_INFO: ++ case CB_COLOR10_INFO: ++ case CB_COLOR11_INFO: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; ++ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); ++ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { ++ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { ++ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ } ++ break; ++ case CB_COLOR0_PITCH: ++ case CB_COLOR1_PITCH: ++ case CB_COLOR2_PITCH: ++ case CB_COLOR3_PITCH: ++ case CB_COLOR4_PITCH: ++ case CB_COLOR5_PITCH: ++ case CB_COLOR6_PITCH: ++ case CB_COLOR7_PITCH: ++ tmp = (reg - CB_COLOR0_PITCH) / 0x3c; ++ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); ++ track->cb_color_pitch_idx[tmp] = idx; ++ break; ++ case CB_COLOR8_PITCH: ++ case CB_COLOR9_PITCH: ++ case CB_COLOR10_PITCH: ++ case CB_COLOR11_PITCH: ++ tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; ++ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); ++ track->cb_color_pitch_idx[tmp] = idx; ++ break; ++ case CB_COLOR0_SLICE: ++ case CB_COLOR1_SLICE: ++ case CB_COLOR2_SLICE: ++ case CB_COLOR3_SLICE: ++ case CB_COLOR4_SLICE: ++ case CB_COLOR5_SLICE: ++ case CB_COLOR6_SLICE: ++ case CB_COLOR7_SLICE: ++ tmp = (reg - CB_COLOR0_SLICE) / 0x3c; ++ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); ++ track->cb_color_slice_idx[tmp] = idx; ++ break; ++ case CB_COLOR8_SLICE: ++ case CB_COLOR9_SLICE: ++ case CB_COLOR10_SLICE: ++ case CB_COLOR11_SLICE: ++ tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; ++ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); ++ track->cb_color_slice_idx[tmp] = idx; ++ break; ++ case CB_COLOR0_ATTRIB: ++ case CB_COLOR1_ATTRIB: ++ case CB_COLOR2_ATTRIB: ++ case CB_COLOR3_ATTRIB: ++ case CB_COLOR4_ATTRIB: ++ case CB_COLOR5_ATTRIB: ++ case CB_COLOR6_ATTRIB: ++ case CB_COLOR7_ATTRIB: ++ case CB_COLOR8_ATTRIB: ++ case CB_COLOR9_ATTRIB: ++ case CB_COLOR10_ATTRIB: ++ case CB_COLOR11_ATTRIB: ++ break; ++ case CB_COLOR0_DIM: ++ case CB_COLOR1_DIM: ++ case CB_COLOR2_DIM: ++ case CB_COLOR3_DIM: ++ case CB_COLOR4_DIM: ++ case CB_COLOR5_DIM: ++ case CB_COLOR6_DIM: ++ case CB_COLOR7_DIM: ++ tmp = (reg - CB_COLOR0_DIM) / 0x3c; ++ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); ++ track->cb_color_dim_idx[tmp] = idx; ++ break; ++ case CB_COLOR8_DIM: ++ case CB_COLOR9_DIM: ++ case CB_COLOR10_DIM: ++ case CB_COLOR11_DIM: ++ tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; ++ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); ++ track->cb_color_dim_idx[tmp] = idx; ++ break; ++ case CB_COLOR0_FMASK: ++ case CB_COLOR1_FMASK: ++ case CB_COLOR2_FMASK: ++ case CB_COLOR3_FMASK: ++ case CB_COLOR4_FMASK: ++ case CB_COLOR5_FMASK: ++ case CB_COLOR6_FMASK: ++ case CB_COLOR7_FMASK: ++ tmp = (reg - CB_COLOR0_FMASK) / 0x3c; ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); ++ return -EINVAL; ++ } ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->cb_color_fmask_bo[tmp] = reloc->robj; ++ break; ++ case CB_COLOR0_CMASK: ++ case CB_COLOR1_CMASK: ++ case CB_COLOR2_CMASK: ++ case CB_COLOR3_CMASK: ++ case CB_COLOR4_CMASK: ++ case CB_COLOR5_CMASK: ++ case CB_COLOR6_CMASK: ++ case CB_COLOR7_CMASK: ++ tmp = (reg - CB_COLOR0_CMASK) / 0x3c; ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); ++ return -EINVAL; ++ } ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->cb_color_cmask_bo[tmp] = reloc->robj; ++ break; ++ case CB_COLOR0_FMASK_SLICE: ++ case CB_COLOR1_FMASK_SLICE: ++ case CB_COLOR2_FMASK_SLICE: ++ case CB_COLOR3_FMASK_SLICE: ++ case CB_COLOR4_FMASK_SLICE: ++ case CB_COLOR5_FMASK_SLICE: ++ case CB_COLOR6_FMASK_SLICE: ++ case CB_COLOR7_FMASK_SLICE: ++ tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; ++ track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); ++ break; ++ case CB_COLOR0_CMASK_SLICE: ++ case CB_COLOR1_CMASK_SLICE: ++ case CB_COLOR2_CMASK_SLICE: ++ case CB_COLOR3_CMASK_SLICE: ++ case CB_COLOR4_CMASK_SLICE: ++ case CB_COLOR5_CMASK_SLICE: ++ case CB_COLOR6_CMASK_SLICE: ++ case CB_COLOR7_CMASK_SLICE: ++ tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; ++ track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); ++ break; ++ case CB_COLOR0_BASE: ++ case CB_COLOR1_BASE: ++ case CB_COLOR2_BASE: ++ case CB_COLOR3_BASE: ++ case CB_COLOR4_BASE: ++ case CB_COLOR5_BASE: ++ case CB_COLOR6_BASE: ++ case CB_COLOR7_BASE: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ tmp = (reg - CB_COLOR0_BASE) / 0x3c; ++ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->cb_color_base_last[tmp] = ib[idx]; ++ track->cb_color_bo[tmp] = reloc->robj; ++ break; ++ case CB_COLOR8_BASE: ++ case CB_COLOR9_BASE: ++ case CB_COLOR10_BASE: ++ case CB_COLOR11_BASE: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; ++ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ track->cb_color_base_last[tmp] = ib[idx]; ++ track->cb_color_bo[tmp] = reloc->robj; ++ break; ++ case CB_IMMED0_BASE: ++ case CB_IMMED1_BASE: ++ case CB_IMMED2_BASE: ++ case CB_IMMED3_BASE: ++ case CB_IMMED4_BASE: ++ case CB_IMMED5_BASE: ++ case CB_IMMED6_BASE: ++ case CB_IMMED7_BASE: ++ case CB_IMMED8_BASE: ++ case CB_IMMED9_BASE: ++ case CB_IMMED10_BASE: ++ case CB_IMMED11_BASE: ++ case DB_HTILE_DATA_BASE: ++ case SQ_PGM_START_FS: ++ case SQ_PGM_START_ES: ++ case SQ_PGM_START_VS: ++ case SQ_PGM_START_GS: ++ case SQ_PGM_START_PS: ++ case SQ_PGM_START_HS: ++ case SQ_PGM_START_LS: ++ case GDS_ADDR_BASE: ++ case SQ_CONST_MEM_BASE: ++ case SQ_ALU_CONST_CACHE_GS_0: ++ case SQ_ALU_CONST_CACHE_GS_1: ++ case SQ_ALU_CONST_CACHE_GS_2: ++ case SQ_ALU_CONST_CACHE_GS_3: ++ case SQ_ALU_CONST_CACHE_GS_4: ++ case SQ_ALU_CONST_CACHE_GS_5: ++ case SQ_ALU_CONST_CACHE_GS_6: ++ case SQ_ALU_CONST_CACHE_GS_7: ++ case SQ_ALU_CONST_CACHE_GS_8: ++ case SQ_ALU_CONST_CACHE_GS_9: ++ case SQ_ALU_CONST_CACHE_GS_10: ++ case SQ_ALU_CONST_CACHE_GS_11: ++ case SQ_ALU_CONST_CACHE_GS_12: ++ case SQ_ALU_CONST_CACHE_GS_13: ++ case SQ_ALU_CONST_CACHE_GS_14: ++ case SQ_ALU_CONST_CACHE_GS_15: ++ case SQ_ALU_CONST_CACHE_PS_0: ++ case SQ_ALU_CONST_CACHE_PS_1: ++ case SQ_ALU_CONST_CACHE_PS_2: ++ case SQ_ALU_CONST_CACHE_PS_3: ++ case SQ_ALU_CONST_CACHE_PS_4: ++ case SQ_ALU_CONST_CACHE_PS_5: ++ case SQ_ALU_CONST_CACHE_PS_6: ++ case SQ_ALU_CONST_CACHE_PS_7: ++ case SQ_ALU_CONST_CACHE_PS_8: ++ case SQ_ALU_CONST_CACHE_PS_9: ++ case SQ_ALU_CONST_CACHE_PS_10: ++ case SQ_ALU_CONST_CACHE_PS_11: ++ case SQ_ALU_CONST_CACHE_PS_12: ++ case SQ_ALU_CONST_CACHE_PS_13: ++ case SQ_ALU_CONST_CACHE_PS_14: ++ case SQ_ALU_CONST_CACHE_PS_15: ++ case SQ_ALU_CONST_CACHE_VS_0: ++ case SQ_ALU_CONST_CACHE_VS_1: ++ case SQ_ALU_CONST_CACHE_VS_2: ++ case SQ_ALU_CONST_CACHE_VS_3: ++ case SQ_ALU_CONST_CACHE_VS_4: ++ case SQ_ALU_CONST_CACHE_VS_5: ++ case SQ_ALU_CONST_CACHE_VS_6: ++ case SQ_ALU_CONST_CACHE_VS_7: ++ case SQ_ALU_CONST_CACHE_VS_8: ++ case SQ_ALU_CONST_CACHE_VS_9: ++ case SQ_ALU_CONST_CACHE_VS_10: ++ case SQ_ALU_CONST_CACHE_VS_11: ++ case SQ_ALU_CONST_CACHE_VS_12: ++ case SQ_ALU_CONST_CACHE_VS_13: ++ case SQ_ALU_CONST_CACHE_VS_14: ++ case SQ_ALU_CONST_CACHE_VS_15: ++ case SQ_ALU_CONST_CACHE_HS_0: ++ case SQ_ALU_CONST_CACHE_HS_1: ++ case SQ_ALU_CONST_CACHE_HS_2: ++ case SQ_ALU_CONST_CACHE_HS_3: ++ case SQ_ALU_CONST_CACHE_HS_4: ++ case SQ_ALU_CONST_CACHE_HS_5: ++ case SQ_ALU_CONST_CACHE_HS_6: ++ case SQ_ALU_CONST_CACHE_HS_7: ++ case SQ_ALU_CONST_CACHE_HS_8: ++ case SQ_ALU_CONST_CACHE_HS_9: ++ case SQ_ALU_CONST_CACHE_HS_10: ++ case SQ_ALU_CONST_CACHE_HS_11: ++ case SQ_ALU_CONST_CACHE_HS_12: ++ case SQ_ALU_CONST_CACHE_HS_13: ++ case SQ_ALU_CONST_CACHE_HS_14: ++ case SQ_ALU_CONST_CACHE_HS_15: ++ case SQ_ALU_CONST_CACHE_LS_0: ++ case SQ_ALU_CONST_CACHE_LS_1: ++ case SQ_ALU_CONST_CACHE_LS_2: ++ case SQ_ALU_CONST_CACHE_LS_3: ++ case SQ_ALU_CONST_CACHE_LS_4: ++ case SQ_ALU_CONST_CACHE_LS_5: ++ case SQ_ALU_CONST_CACHE_LS_6: ++ case SQ_ALU_CONST_CACHE_LS_7: ++ case SQ_ALU_CONST_CACHE_LS_8: ++ case SQ_ALU_CONST_CACHE_LS_9: ++ case SQ_ALU_CONST_CACHE_LS_10: ++ case SQ_ALU_CONST_CACHE_LS_11: ++ case SQ_ALU_CONST_CACHE_LS_12: ++ case SQ_ALU_CONST_CACHE_LS_13: ++ case SQ_ALU_CONST_CACHE_LS_14: ++ case SQ_ALU_CONST_CACHE_LS_15: ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ dev_warn(p->dev, "bad SET_CONTEXT_REG " ++ "0x%04X\n", reg); ++ return -EINVAL; ++ } ++ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ break; ++ default: ++ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/** ++ * evergreen_check_texture_resource() - check if register is authorized or not ++ * @p: parser structure holding parsing context ++ * @idx: index into the cs buffer ++ * @texture: texture's bo structure ++ * @mipmap: mipmap's bo structure ++ * ++ * This function will check that the resource has valid field and that ++ * the texture and mipmap bo object are big enough to cover this resource. ++ */ ++static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, ++ struct radeon_bo *texture, ++ struct radeon_bo *mipmap) ++{ ++ /* XXX fill in */ ++ return 0; ++} ++ ++static int evergreen_packet3_check(struct radeon_cs_parser *p, ++ struct radeon_cs_packet *pkt) ++{ ++ struct radeon_cs_reloc *reloc; ++ struct evergreen_cs_track *track; ++ volatile u32 *ib; ++ unsigned idx; ++ unsigned i; ++ unsigned start_reg, end_reg, reg; ++ int r; ++ u32 idx_value; ++ ++ track = (struct evergreen_cs_track *)p->track; ++ ib = p->ib->ptr; ++ idx = pkt->idx + 1; ++ idx_value = radeon_get_ib_value(p, idx); ++ ++ switch (pkt->opcode) { ++ case PACKET3_CONTEXT_CONTROL: ++ if (pkt->count != 1) { ++ DRM_ERROR("bad CONTEXT_CONTROL\n"); ++ return -EINVAL; ++ } ++ break; ++ case PACKET3_INDEX_TYPE: ++ case PACKET3_NUM_INSTANCES: ++ case PACKET3_CLEAR_STATE: ++ if (pkt->count) { ++ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); ++ return -EINVAL; ++ } ++ break; ++ case PACKET3_INDEX_BASE: ++ if (pkt->count != 1) { ++ DRM_ERROR("bad INDEX_BASE\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad INDEX_BASE\n"); ++ return -EINVAL; ++ } ++ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX: ++ if (pkt->count != 3) { ++ DRM_ERROR("bad DRAW_INDEX\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad DRAW_INDEX\n"); ++ return -EINVAL; ++ } ++ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX_2: ++ if (pkt->count != 4) { ++ DRM_ERROR("bad DRAW_INDEX_2\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad DRAW_INDEX_2\n"); ++ return -EINVAL; ++ } ++ ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX_AUTO: ++ if (pkt->count != 1) { ++ DRM_ERROR("bad DRAW_INDEX_AUTO\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX_MULTI_AUTO: ++ if (pkt->count != 2) { ++ DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX_IMMD: ++ if (pkt->count < 2) { ++ DRM_ERROR("bad DRAW_INDEX_IMMD\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX_OFFSET: ++ if (pkt->count != 2) { ++ DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); ++ return r; ++ } ++ break; ++ case PACKET3_DRAW_INDEX_OFFSET_2: ++ if (pkt->count != 3) { ++ DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_track_check(p); ++ if (r) { ++ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); ++ return r; ++ } ++ break; ++ case PACKET3_WAIT_REG_MEM: ++ if (pkt->count != 5) { ++ DRM_ERROR("bad WAIT_REG_MEM\n"); ++ return -EINVAL; ++ } ++ /* bit 4 is reg (0) or mem (1) */ ++ if (idx_value & 0x10) { ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad WAIT_REG_MEM\n"); ++ return -EINVAL; ++ } ++ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ } ++ break; ++ case PACKET3_SURFACE_SYNC: ++ if (pkt->count != 3) { ++ DRM_ERROR("bad SURFACE_SYNC\n"); ++ return -EINVAL; ++ } ++ /* 0xffffffff/0x0 is flush all cache flag */ ++ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || ++ radeon_get_ib_value(p, idx + 2) != 0) { ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad SURFACE_SYNC\n"); ++ return -EINVAL; ++ } ++ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ } ++ break; ++ case PACKET3_EVENT_WRITE: ++ if (pkt->count != 2 && pkt->count != 0) { ++ DRM_ERROR("bad EVENT_WRITE\n"); ++ return -EINVAL; ++ } ++ if (pkt->count) { ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad EVENT_WRITE\n"); ++ return -EINVAL; ++ } ++ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ } ++ break; ++ case PACKET3_EVENT_WRITE_EOP: ++ if (pkt->count != 4) { ++ DRM_ERROR("bad EVENT_WRITE_EOP\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad EVENT_WRITE_EOP\n"); ++ return -EINVAL; ++ } ++ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ break; ++ case PACKET3_EVENT_WRITE_EOS: ++ if (pkt->count != 3) { ++ DRM_ERROR("bad EVENT_WRITE_EOS\n"); ++ return -EINVAL; ++ } ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad EVENT_WRITE_EOS\n"); ++ return -EINVAL; ++ } ++ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); ++ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ break; ++ case PACKET3_SET_CONFIG_REG: ++ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_CONFIG_REG_START) || ++ (start_reg >= PACKET3_SET_CONFIG_REG_END) || ++ (end_reg >= PACKET3_SET_CONFIG_REG_END)) { ++ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); ++ return -EINVAL; ++ } ++ for (i = 0; i < pkt->count; i++) { ++ reg = start_reg + (4 * i); ++ r = evergreen_cs_check_reg(p, reg, idx+1+i); ++ if (r) ++ return r; ++ } ++ break; ++ case PACKET3_SET_CONTEXT_REG: ++ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || ++ (start_reg >= PACKET3_SET_CONTEXT_REG_END) || ++ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { ++ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); ++ return -EINVAL; ++ } ++ for (i = 0; i < pkt->count; i++) { ++ reg = start_reg + (4 * i); ++ r = evergreen_cs_check_reg(p, reg, idx+1+i); ++ if (r) ++ return r; ++ } ++ break; ++ case PACKET3_SET_RESOURCE: ++ if (pkt->count % 8) { ++ DRM_ERROR("bad SET_RESOURCE\n"); ++ return -EINVAL; ++ } ++ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_RESOURCE_START) || ++ (start_reg >= PACKET3_SET_RESOURCE_END) || ++ (end_reg >= PACKET3_SET_RESOURCE_END)) { ++ DRM_ERROR("bad SET_RESOURCE\n"); ++ return -EINVAL; ++ } ++ for (i = 0; i < (pkt->count / 8); i++) { ++ struct radeon_bo *texture, *mipmap; ++ u32 size, offset; ++ ++ switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { ++ case SQ_TEX_VTX_VALID_TEXTURE: ++ /* tex base */ ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad SET_RESOURCE (tex)\n"); ++ return -EINVAL; ++ } ++ ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) ++ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); ++ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) ++ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); ++ texture = reloc->robj; ++ /* tex mip base */ ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad SET_RESOURCE (tex)\n"); ++ return -EINVAL; ++ } ++ ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); ++ mipmap = reloc->robj; ++ r = evergreen_check_texture_resource(p, idx+1+(i*8), ++ texture, mipmap); ++ if (r) ++ return r; ++ break; ++ case SQ_TEX_VTX_VALID_BUFFER: ++ /* vtx base */ ++ r = evergreen_cs_packet_next_reloc(p, &reloc); ++ if (r) { ++ DRM_ERROR("bad SET_RESOURCE (vtx)\n"); ++ return -EINVAL; ++ } ++ offset = radeon_get_ib_value(p, idx+1+(i*8)+0); ++ size = radeon_get_ib_value(p, idx+1+(i*8)+1); ++ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { ++ /* force size to size of the buffer */ ++ dev_warn(p->dev, "vbo resource seems too big for the bo\n"); ++ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); ++ } ++ ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); ++ ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; ++ break; ++ case SQ_TEX_VTX_INVALID_TEXTURE: ++ case SQ_TEX_VTX_INVALID_BUFFER: ++ default: ++ DRM_ERROR("bad SET_RESOURCE\n"); ++ return -EINVAL; ++ } ++ } ++ break; ++ case PACKET3_SET_ALU_CONST: ++ /* XXX fix me ALU const buffers only */ ++ break; ++ case PACKET3_SET_BOOL_CONST: ++ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_BOOL_CONST_START) || ++ (start_reg >= PACKET3_SET_BOOL_CONST_END) || ++ (end_reg >= PACKET3_SET_BOOL_CONST_END)) { ++ DRM_ERROR("bad SET_BOOL_CONST\n"); ++ return -EINVAL; ++ } ++ break; ++ case PACKET3_SET_LOOP_CONST: ++ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_LOOP_CONST_START) || ++ (start_reg >= PACKET3_SET_LOOP_CONST_END) || ++ (end_reg >= PACKET3_SET_LOOP_CONST_END)) { ++ DRM_ERROR("bad SET_LOOP_CONST\n"); ++ return -EINVAL; ++ } ++ break; ++ case PACKET3_SET_CTL_CONST: ++ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_CTL_CONST_START) || ++ (start_reg >= PACKET3_SET_CTL_CONST_END) || ++ (end_reg >= PACKET3_SET_CTL_CONST_END)) { ++ DRM_ERROR("bad SET_CTL_CONST\n"); ++ return -EINVAL; ++ } ++ break; ++ case PACKET3_SET_SAMPLER: ++ if (pkt->count % 3) { ++ DRM_ERROR("bad SET_SAMPLER\n"); ++ return -EINVAL; ++ } ++ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; ++ end_reg = 4 * pkt->count + start_reg - 4; ++ if ((start_reg < PACKET3_SET_SAMPLER_START) || ++ (start_reg >= PACKET3_SET_SAMPLER_END) || ++ (end_reg >= PACKET3_SET_SAMPLER_END)) { ++ DRM_ERROR("bad SET_SAMPLER\n"); ++ return -EINVAL; ++ } ++ break; ++ case PACKET3_NOP: ++ break; ++ default: ++ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int evergreen_cs_parse(struct radeon_cs_parser *p) ++{ ++ struct radeon_cs_packet pkt; ++ struct evergreen_cs_track *track; ++ int r; ++ ++ if (p->track == NULL) { ++ /* initialize tracker, we are in kms */ ++ track = kzalloc(sizeof(*track), GFP_KERNEL); ++ if (track == NULL) ++ return -ENOMEM; ++ evergreen_cs_track_init(track); ++ track->npipes = p->rdev->config.evergreen.tiling_npipes; ++ track->nbanks = p->rdev->config.evergreen.tiling_nbanks; ++ track->group_size = p->rdev->config.evergreen.tiling_group_size; ++ p->track = track; ++ } ++ do { ++ r = evergreen_cs_packet_parse(p, &pkt, p->idx); ++ if (r) { ++ kfree(p->track); ++ p->track = NULL; ++ return r; ++ } ++ p->idx += pkt.count + 2; ++ switch (pkt.type) { ++ case PACKET_TYPE0: ++ r = evergreen_cs_parse_packet0(p, &pkt); ++ break; ++ case PACKET_TYPE2: ++ break; ++ case PACKET_TYPE3: ++ r = evergreen_packet3_check(p, &pkt); ++ break; ++ default: ++ DRM_ERROR("Unknown packet type %d !\n", pkt.type); ++ kfree(p->track); ++ p->track = NULL; ++ return -EINVAL; ++ } ++ if (r) { ++ kfree(p->track); ++ p->track = NULL; ++ return r; ++ } ++ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); ++#if 0 ++ for (r = 0; r < p->ib->length_dw; r++) { ++ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); ++ mdelay(1); ++ } ++#endif ++ kfree(p->track); ++ p->track = NULL; ++ return 0; ++} ++ +diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h +index f7c7c96..e028c1c 100644 +--- a/drivers/gpu/drm/radeon/evergreen_reg.h ++++ b/drivers/gpu/drm/radeon/evergreen_reg.h +@@ -151,6 +151,9 @@ + #define EVERGREEN_DATA_FORMAT 0x6b00 + # define EVERGREEN_INTERLEAVE_EN (1 << 0) + #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 ++#define EVERGREEN_VLINE_START_END 0x6b08 ++#define EVERGREEN_VLINE_STATUS 0x6bb8 ++# define EVERGREEN_VLINE_STAT (1 << 12) + + #define EVERGREEN_VIEWPORT_START 0x6d70 + #define EVERGREEN_VIEWPORT_SIZE 0x6d74 +@@ -164,8 +167,12 @@ + #define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) + + /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ ++#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34 + #define EVERGREEN_CRTC_CONTROL 0x6e70 + # define EVERGREEN_CRTC_MASTER_EN (1 << 0) ++# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) ++#define EVERGREEN_CRTC_STATUS 0x6e8c ++#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 + #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 + + #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 +diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h +new file mode 100644 +index 0000000..79683f6 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/evergreend.h +@@ -0,0 +1,1020 @@ ++/* ++ * Copyright 2010 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Alex Deucher ++ */ ++#ifndef EVERGREEND_H ++#define EVERGREEND_H ++ ++#define EVERGREEN_MAX_SH_GPRS 256 ++#define EVERGREEN_MAX_TEMP_GPRS 16 ++#define EVERGREEN_MAX_SH_THREADS 256 ++#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096 ++#define EVERGREEN_MAX_FRC_EOV_CNT 16384 ++#define EVERGREEN_MAX_BACKENDS 8 ++#define EVERGREEN_MAX_BACKENDS_MASK 0xFF ++#define EVERGREEN_MAX_SIMDS 16 ++#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF ++#define EVERGREEN_MAX_PIPES 8 ++#define EVERGREEN_MAX_PIPES_MASK 0xFF ++#define EVERGREEN_MAX_LDS_NUM 0xFFFF ++ ++/* Registers */ ++ ++#define RCU_IND_INDEX 0x100 ++#define RCU_IND_DATA 0x104 ++ ++#define GRBM_GFX_INDEX 0x802C ++#define INSTANCE_INDEX(x) ((x) << 0) ++#define SE_INDEX(x) ((x) << 16) ++#define INSTANCE_BROADCAST_WRITES (1 << 30) ++#define SE_BROADCAST_WRITES (1 << 31) ++#define RLC_GFX_INDEX 0x3fC4 ++#define CC_GC_SHADER_PIPE_CONFIG 0x8950 ++#define WRITE_DIS (1 << 0) ++#define CC_RB_BACKEND_DISABLE 0x98F4 ++#define BACKEND_DISABLE(x) ((x) << 16) ++#define GB_ADDR_CONFIG 0x98F8 ++#define NUM_PIPES(x) ((x) << 0) ++#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) ++#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) ++#define NUM_SHADER_ENGINES(x) ((x) << 12) ++#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) ++#define NUM_GPUS(x) ((x) << 20) ++#define MULTI_GPU_TILE_SIZE(x) ((x) << 24) ++#define ROW_SIZE(x) ((x) << 28) ++#define GB_BACKEND_MAP 0x98FC ++#define DMIF_ADDR_CONFIG 0xBD4 ++#define HDP_ADDR_CONFIG 0x2F48 ++ ++#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 ++#define GC_USER_RB_BACKEND_DISABLE 0x9B7C ++ ++#define CGTS_SYS_TCC_DISABLE 0x3F90 ++#define CGTS_TCC_DISABLE 0x9148 ++#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 ++#define CGTS_USER_TCC_DISABLE 0x914C ++ ++#define CONFIG_MEMSIZE 0x5428 ++ ++#define CP_ME_CNTL 0x86D8 ++#define CP_ME_HALT (1 << 28) ++#define CP_PFP_HALT (1 << 26) ++#define CP_ME_RAM_DATA 0xC160 ++#define CP_ME_RAM_RADDR 0xC158 ++#define CP_ME_RAM_WADDR 0xC15C ++#define CP_MEQ_THRESHOLDS 0x8764 ++#define STQ_SPLIT(x) ((x) << 0) ++#define CP_PERFMON_CNTL 0x87FC ++#define CP_PFP_UCODE_ADDR 0xC150 ++#define CP_PFP_UCODE_DATA 0xC154 ++#define CP_QUEUE_THRESHOLDS 0x8760 ++#define ROQ_IB1_START(x) ((x) << 0) ++#define ROQ_IB2_START(x) ((x) << 8) ++#define CP_RB_BASE 0xC100 ++#define CP_RB_CNTL 0xC104 ++#define RB_BUFSZ(x) ((x) << 0) ++#define RB_BLKSZ(x) ((x) << 8) ++#define RB_NO_UPDATE (1 << 27) ++#define RB_RPTR_WR_ENA (1 << 31) ++#define BUF_SWAP_32BIT (2 << 16) ++#define CP_RB_RPTR 0x8700 ++#define CP_RB_RPTR_ADDR 0xC10C ++#define CP_RB_RPTR_ADDR_HI 0xC110 ++#define CP_RB_RPTR_WR 0xC108 ++#define CP_RB_WPTR 0xC114 ++#define CP_RB_WPTR_ADDR 0xC118 ++#define CP_RB_WPTR_ADDR_HI 0xC11C ++#define CP_RB_WPTR_DELAY 0x8704 ++#define CP_SEM_WAIT_TIMER 0x85BC ++#define CP_DEBUG 0xC1FC ++ ++ ++#define GC_USER_SHADER_PIPE_CONFIG 0x8954 ++#define INACTIVE_QD_PIPES(x) ((x) << 8) ++#define INACTIVE_QD_PIPES_MASK 0x0000FF00 ++#define INACTIVE_SIMDS(x) ((x) << 16) ++#define INACTIVE_SIMDS_MASK 0x00FF0000 ++ ++#define GRBM_CNTL 0x8000 ++#define GRBM_READ_TIMEOUT(x) ((x) << 0) ++#define GRBM_SOFT_RESET 0x8020 ++#define SOFT_RESET_CP (1 << 0) ++#define SOFT_RESET_CB (1 << 1) ++#define SOFT_RESET_DB (1 << 3) ++#define SOFT_RESET_PA (1 << 5) ++#define SOFT_RESET_SC (1 << 6) ++#define SOFT_RESET_SPI (1 << 8) ++#define SOFT_RESET_SH (1 << 9) ++#define SOFT_RESET_SX (1 << 10) ++#define SOFT_RESET_TC (1 << 11) ++#define SOFT_RESET_TA (1 << 12) ++#define SOFT_RESET_VC (1 << 13) ++#define SOFT_RESET_VGT (1 << 14) ++ ++#define GRBM_STATUS 0x8010 ++#define CMDFIFO_AVAIL_MASK 0x0000000F ++#define SRBM_RQ_PENDING (1 << 5) ++#define CF_RQ_PENDING (1 << 7) ++#define PF_RQ_PENDING (1 << 8) ++#define GRBM_EE_BUSY (1 << 10) ++#define SX_CLEAN (1 << 11) ++#define DB_CLEAN (1 << 12) ++#define CB_CLEAN (1 << 13) ++#define TA_BUSY (1 << 14) ++#define VGT_BUSY_NO_DMA (1 << 16) ++#define VGT_BUSY (1 << 17) ++#define SX_BUSY (1 << 20) ++#define SH_BUSY (1 << 21) ++#define SPI_BUSY (1 << 22) ++#define SC_BUSY (1 << 24) ++#define PA_BUSY (1 << 25) ++#define DB_BUSY (1 << 26) ++#define CP_COHERENCY_BUSY (1 << 28) ++#define CP_BUSY (1 << 29) ++#define CB_BUSY (1 << 30) ++#define GUI_ACTIVE (1 << 31) ++#define GRBM_STATUS_SE0 0x8014 ++#define GRBM_STATUS_SE1 0x8018 ++#define SE_SX_CLEAN (1 << 0) ++#define SE_DB_CLEAN (1 << 1) ++#define SE_CB_CLEAN (1 << 2) ++#define SE_TA_BUSY (1 << 25) ++#define SE_SX_BUSY (1 << 26) ++#define SE_SPI_BUSY (1 << 27) ++#define SE_SH_BUSY (1 << 28) ++#define SE_SC_BUSY (1 << 29) ++#define SE_DB_BUSY (1 << 30) ++#define SE_CB_BUSY (1 << 31) ++ ++#define HDP_HOST_PATH_CNTL 0x2C00 ++#define HDP_NONSURFACE_BASE 0x2C04 ++#define HDP_NONSURFACE_INFO 0x2C08 ++#define HDP_NONSURFACE_SIZE 0x2C0C ++#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 ++#define HDP_TILING_CONFIG 0x2F3C ++ ++#define MC_SHARED_CHMAP 0x2004 ++#define NOOFCHAN_SHIFT 12 ++#define NOOFCHAN_MASK 0x00003000 ++ ++#define MC_ARB_RAMCFG 0x2760 ++#define NOOFBANK_SHIFT 0 ++#define NOOFBANK_MASK 0x00000003 ++#define NOOFRANK_SHIFT 2 ++#define NOOFRANK_MASK 0x00000004 ++#define NOOFROWS_SHIFT 3 ++#define NOOFROWS_MASK 0x00000038 ++#define NOOFCOLS_SHIFT 6 ++#define NOOFCOLS_MASK 0x000000C0 ++#define CHANSIZE_SHIFT 8 ++#define CHANSIZE_MASK 0x00000100 ++#define BURSTLENGTH_SHIFT 9 ++#define BURSTLENGTH_MASK 0x00000200 ++#define CHANSIZE_OVERRIDE (1 << 11) ++#define MC_VM_AGP_TOP 0x2028 ++#define MC_VM_AGP_BOT 0x202C ++#define MC_VM_AGP_BASE 0x2030 ++#define MC_VM_FB_LOCATION 0x2024 ++#define MC_VM_MB_L1_TLB0_CNTL 0x2234 ++#define MC_VM_MB_L1_TLB1_CNTL 0x2238 ++#define MC_VM_MB_L1_TLB2_CNTL 0x223C ++#define MC_VM_MB_L1_TLB3_CNTL 0x2240 ++#define ENABLE_L1_TLB (1 << 0) ++#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) ++#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) ++#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) ++#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) ++#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) ++#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) ++#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15) ++#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18) ++#define MC_VM_MD_L1_TLB0_CNTL 0x2654 ++#define MC_VM_MD_L1_TLB1_CNTL 0x2658 ++#define MC_VM_MD_L1_TLB2_CNTL 0x265C ++#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C ++#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 ++#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 ++ ++#define PA_CL_ENHANCE 0x8A14 ++#define CLIP_VTX_REORDER_ENA (1 << 0) ++#define NUM_CLIP_SEQ(x) ((x) << 1) ++#define PA_SC_AA_CONFIG 0x28C04 ++#define MSAA_NUM_SAMPLES_SHIFT 0 ++#define MSAA_NUM_SAMPLES_MASK 0x3 ++#define PA_SC_CLIPRECT_RULE 0x2820C ++#define PA_SC_EDGERULE 0x28230 ++#define PA_SC_FIFO_SIZE 0x8BCC ++#define SC_PRIM_FIFO_SIZE(x) ((x) << 0) ++#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) ++#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) ++#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 ++#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) ++#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) ++#define PA_SC_LINE_STIPPLE 0x28A0C ++#define PA_SC_LINE_STIPPLE_STATE 0x8B10 ++ ++#define SCRATCH_REG0 0x8500 ++#define SCRATCH_REG1 0x8504 ++#define SCRATCH_REG2 0x8508 ++#define SCRATCH_REG3 0x850C ++#define SCRATCH_REG4 0x8510 ++#define SCRATCH_REG5 0x8514 ++#define SCRATCH_REG6 0x8518 ++#define SCRATCH_REG7 0x851C ++#define SCRATCH_UMSK 0x8540 ++#define SCRATCH_ADDR 0x8544 ++ ++#define SMX_DC_CTL0 0xA020 ++#define USE_HASH_FUNCTION (1 << 0) ++#define NUMBER_OF_SETS(x) ((x) << 1) ++#define FLUSH_ALL_ON_EVENT (1 << 10) ++#define STALL_ON_EVENT (1 << 11) ++#define SMX_EVENT_CTL 0xA02C ++#define ES_FLUSH_CTL(x) ((x) << 0) ++#define GS_FLUSH_CTL(x) ((x) << 3) ++#define ACK_FLUSH_CTL(x) ((x) << 6) ++#define SYNC_FLUSH_CTL (1 << 8) ++ ++#define SPI_CONFIG_CNTL 0x9100 ++#define GPR_WRITE_PRIORITY(x) ((x) << 0) ++#define SPI_CONFIG_CNTL_1 0x913C ++#define VTX_DONE_DELAY(x) ((x) << 0) ++#define INTERP_ONE_PRIM_PER_ROW (1 << 4) ++#define SPI_INPUT_Z 0x286D8 ++#define SPI_PS_IN_CONTROL_0 0x286CC ++#define NUM_INTERP(x) ((x)<<0) ++#define POSITION_ENA (1<<8) ++#define POSITION_CENTROID (1<<9) ++#define POSITION_ADDR(x) ((x)<<10) ++#define PARAM_GEN(x) ((x)<<15) ++#define PARAM_GEN_ADDR(x) ((x)<<19) ++#define BARYC_SAMPLE_CNTL(x) ((x)<<26) ++#define PERSP_GRADIENT_ENA (1<<28) ++#define LINEAR_GRADIENT_ENA (1<<29) ++#define POSITION_SAMPLE (1<<30) ++#define BARYC_AT_SAMPLE_ENA (1<<31) ++ ++#define SQ_CONFIG 0x8C00 ++#define VC_ENABLE (1 << 0) ++#define EXPORT_SRC_C (1 << 1) ++#define CS_PRIO(x) ((x) << 18) ++#define LS_PRIO(x) ((x) << 20) ++#define HS_PRIO(x) ((x) << 22) ++#define PS_PRIO(x) ((x) << 24) ++#define VS_PRIO(x) ((x) << 26) ++#define GS_PRIO(x) ((x) << 28) ++#define ES_PRIO(x) ((x) << 30) ++#define SQ_GPR_RESOURCE_MGMT_1 0x8C04 ++#define NUM_PS_GPRS(x) ((x) << 0) ++#define NUM_VS_GPRS(x) ((x) << 16) ++#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) ++#define SQ_GPR_RESOURCE_MGMT_2 0x8C08 ++#define NUM_GS_GPRS(x) ((x) << 0) ++#define NUM_ES_GPRS(x) ((x) << 16) ++#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C ++#define NUM_HS_GPRS(x) ((x) << 0) ++#define NUM_LS_GPRS(x) ((x) << 16) ++#define SQ_THREAD_RESOURCE_MGMT 0x8C18 ++#define NUM_PS_THREADS(x) ((x) << 0) ++#define NUM_VS_THREADS(x) ((x) << 8) ++#define NUM_GS_THREADS(x) ((x) << 16) ++#define NUM_ES_THREADS(x) ((x) << 24) ++#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C ++#define NUM_HS_THREADS(x) ((x) << 0) ++#define NUM_LS_THREADS(x) ((x) << 8) ++#define SQ_STACK_RESOURCE_MGMT_1 0x8C20 ++#define NUM_PS_STACK_ENTRIES(x) ((x) << 0) ++#define NUM_VS_STACK_ENTRIES(x) ((x) << 16) ++#define SQ_STACK_RESOURCE_MGMT_2 0x8C24 ++#define NUM_GS_STACK_ENTRIES(x) ((x) << 0) ++#define NUM_ES_STACK_ENTRIES(x) ((x) << 16) ++#define SQ_STACK_RESOURCE_MGMT_3 0x8C28 ++#define NUM_HS_STACK_ENTRIES(x) ((x) << 0) ++#define NUM_LS_STACK_ENTRIES(x) ((x) << 16) ++#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C ++#define SQ_LDS_RESOURCE_MGMT 0x8E2C ++ ++#define SQ_MS_FIFO_SIZES 0x8CF0 ++#define CACHE_FIFO_SIZE(x) ((x) << 0) ++#define FETCH_FIFO_HIWATER(x) ((x) << 8) ++#define DONE_FIFO_HIWATER(x) ((x) << 16) ++#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) ++ ++#define SX_DEBUG_1 0x9058 ++#define ENABLE_NEW_SMX_ADDRESS (1 << 16) ++#define SX_EXPORT_BUFFER_SIZES 0x900C ++#define COLOR_BUFFER_SIZE(x) ((x) << 0) ++#define POSITION_BUFFER_SIZE(x) ((x) << 8) ++#define SMX_BUFFER_SIZE(x) ((x) << 16) ++#define SX_MISC 0x28350 ++ ++#define CB_PERF_CTR0_SEL_0 0x9A20 ++#define CB_PERF_CTR0_SEL_1 0x9A24 ++#define CB_PERF_CTR1_SEL_0 0x9A28 ++#define CB_PERF_CTR1_SEL_1 0x9A2C ++#define CB_PERF_CTR2_SEL_0 0x9A30 ++#define CB_PERF_CTR2_SEL_1 0x9A34 ++#define CB_PERF_CTR3_SEL_0 0x9A38 ++#define CB_PERF_CTR3_SEL_1 0x9A3C ++ ++#define TA_CNTL_AUX 0x9508 ++#define DISABLE_CUBE_WRAP (1 << 0) ++#define DISABLE_CUBE_ANISO (1 << 1) ++#define SYNC_GRADIENT (1 << 24) ++#define SYNC_WALKER (1 << 25) ++#define SYNC_ALIGNER (1 << 26) ++ ++#define VGT_CACHE_INVALIDATION 0x88C4 ++#define CACHE_INVALIDATION(x) ((x) << 0) ++#define VC_ONLY 0 ++#define TC_ONLY 1 ++#define VC_AND_TC 2 ++#define AUTO_INVLD_EN(x) ((x) << 6) ++#define NO_AUTO 0 ++#define ES_AUTO 1 ++#define GS_AUTO 2 ++#define ES_AND_GS_AUTO 3 ++#define VGT_GS_VERTEX_REUSE 0x88D4 ++#define VGT_NUM_INSTANCES 0x8974 ++#define VGT_OUT_DEALLOC_CNTL 0x28C5C ++#define DEALLOC_DIST_MASK 0x0000007F ++#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 ++#define VTX_REUSE_DEPTH_MASK 0x000000FF ++ ++#define VM_CONTEXT0_CNTL 0x1410 ++#define ENABLE_CONTEXT (1 << 0) ++#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) ++#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) ++#define VM_CONTEXT1_CNTL 0x1414 ++#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C ++#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C ++#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C ++#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 ++#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 ++#define REQUEST_TYPE(x) (((x) & 0xf) << 0) ++#define RESPONSE_TYPE_MASK 0x000000F0 ++#define RESPONSE_TYPE_SHIFT 4 ++#define VM_L2_CNTL 0x1400 ++#define ENABLE_L2_CACHE (1 << 0) ++#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) ++#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) ++#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) ++#define VM_L2_CNTL2 0x1404 ++#define INVALIDATE_ALL_L1_TLBS (1 << 0) ++#define INVALIDATE_L2_CACHE (1 << 1) ++#define VM_L2_CNTL3 0x1408 ++#define BANK_SELECT(x) ((x) << 0) ++#define CACHE_UPDATE_MODE(x) ((x) << 6) ++#define VM_L2_STATUS 0x140C ++#define L2_BUSY (1 << 0) ++ ++#define WAIT_UNTIL 0x8040 ++ ++#define SRBM_STATUS 0x0E50 ++#define SRBM_SOFT_RESET 0x0E60 ++#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6 ++#define SOFT_RESET_BIF (1 << 1) ++#define SOFT_RESET_CG (1 << 2) ++#define SOFT_RESET_DC (1 << 5) ++#define SOFT_RESET_GRBM (1 << 8) ++#define SOFT_RESET_HDP (1 << 9) ++#define SOFT_RESET_IH (1 << 10) ++#define SOFT_RESET_MC (1 << 11) ++#define SOFT_RESET_RLC (1 << 13) ++#define SOFT_RESET_ROM (1 << 14) ++#define SOFT_RESET_SEM (1 << 15) ++#define SOFT_RESET_VMC (1 << 17) ++#define SOFT_RESET_TST (1 << 21) ++#define SOFT_RESET_REGBB (1 << 22) ++#define SOFT_RESET_ORB (1 << 23) ++ ++#define IH_RB_CNTL 0x3e00 ++# define IH_RB_ENABLE (1 << 0) ++# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ ++# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) ++# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) ++# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ ++# define IH_WPTR_OVERFLOW_ENABLE (1 << 16) ++# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) ++#define IH_RB_BASE 0x3e04 ++#define IH_RB_RPTR 0x3e08 ++#define IH_RB_WPTR 0x3e0c ++# define RB_OVERFLOW (1 << 0) ++# define WPTR_OFFSET_MASK 0x3fffc ++#define IH_RB_WPTR_ADDR_HI 0x3e10 ++#define IH_RB_WPTR_ADDR_LO 0x3e14 ++#define IH_CNTL 0x3e18 ++# define ENABLE_INTR (1 << 0) ++# define IH_MC_SWAP(x) ((x) << 2) ++# define IH_MC_SWAP_NONE 0 ++# define IH_MC_SWAP_16BIT 1 ++# define IH_MC_SWAP_32BIT 2 ++# define IH_MC_SWAP_64BIT 3 ++# define RPTR_REARM (1 << 4) ++# define MC_WRREQ_CREDIT(x) ((x) << 15) ++# define MC_WR_CLEAN_CNT(x) ((x) << 20) ++ ++#define CP_INT_CNTL 0xc124 ++# define CNTX_BUSY_INT_ENABLE (1 << 19) ++# define CNTX_EMPTY_INT_ENABLE (1 << 20) ++# define SCRATCH_INT_ENABLE (1 << 25) ++# define TIME_STAMP_INT_ENABLE (1 << 26) ++# define IB2_INT_ENABLE (1 << 29) ++# define IB1_INT_ENABLE (1 << 30) ++# define RB_INT_ENABLE (1 << 31) ++#define CP_INT_STATUS 0xc128 ++# define SCRATCH_INT_STAT (1 << 25) ++# define TIME_STAMP_INT_STAT (1 << 26) ++# define IB2_INT_STAT (1 << 29) ++# define IB1_INT_STAT (1 << 30) ++# define RB_INT_STAT (1 << 31) ++ ++#define GRBM_INT_CNTL 0x8060 ++# define RDERR_INT_ENABLE (1 << 0) ++# define GUI_IDLE_INT_ENABLE (1 << 19) ++ ++/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ ++#define CRTC_STATUS_FRAME_COUNT 0x6e98 ++ ++/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ ++#define VLINE_STATUS 0x6bb8 ++# define VLINE_OCCURRED (1 << 0) ++# define VLINE_ACK (1 << 4) ++# define VLINE_STAT (1 << 12) ++# define VLINE_INTERRUPT (1 << 16) ++# define VLINE_INTERRUPT_TYPE (1 << 17) ++/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ ++#define VBLANK_STATUS 0x6bbc ++# define VBLANK_OCCURRED (1 << 0) ++# define VBLANK_ACK (1 << 4) ++# define VBLANK_STAT (1 << 12) ++# define VBLANK_INTERRUPT (1 << 16) ++# define VBLANK_INTERRUPT_TYPE (1 << 17) ++ ++/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ ++#define INT_MASK 0x6b40 ++# define VBLANK_INT_MASK (1 << 0) ++# define VLINE_INT_MASK (1 << 4) ++ ++#define DISP_INTERRUPT_STATUS 0x60f4 ++# define LB_D1_VLINE_INTERRUPT (1 << 2) ++# define LB_D1_VBLANK_INTERRUPT (1 << 3) ++# define DC_HPD1_INTERRUPT (1 << 17) ++# define DC_HPD1_RX_INTERRUPT (1 << 18) ++# define DACA_AUTODETECT_INTERRUPT (1 << 22) ++# define DACB_AUTODETECT_INTERRUPT (1 << 23) ++# define DC_I2C_SW_DONE_INTERRUPT (1 << 24) ++# define DC_I2C_HW_DONE_INTERRUPT (1 << 25) ++#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 ++# define LB_D2_VLINE_INTERRUPT (1 << 2) ++# define LB_D2_VBLANK_INTERRUPT (1 << 3) ++# define DC_HPD2_INTERRUPT (1 << 17) ++# define DC_HPD2_RX_INTERRUPT (1 << 18) ++# define DISP_TIMER_INTERRUPT (1 << 24) ++#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc ++# define LB_D3_VLINE_INTERRUPT (1 << 2) ++# define LB_D3_VBLANK_INTERRUPT (1 << 3) ++# define DC_HPD3_INTERRUPT (1 << 17) ++# define DC_HPD3_RX_INTERRUPT (1 << 18) ++#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 ++# define LB_D4_VLINE_INTERRUPT (1 << 2) ++# define LB_D4_VBLANK_INTERRUPT (1 << 3) ++# define DC_HPD4_INTERRUPT (1 << 17) ++# define DC_HPD4_RX_INTERRUPT (1 << 18) ++#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c ++# define LB_D5_VLINE_INTERRUPT (1 << 2) ++# define LB_D5_VBLANK_INTERRUPT (1 << 3) ++# define DC_HPD5_INTERRUPT (1 << 17) ++# define DC_HPD5_RX_INTERRUPT (1 << 18) ++#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050 ++# define LB_D6_VLINE_INTERRUPT (1 << 2) ++# define LB_D6_VBLANK_INTERRUPT (1 << 3) ++# define DC_HPD6_INTERRUPT (1 << 17) ++# define DC_HPD6_RX_INTERRUPT (1 << 18) ++ ++/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ ++#define GRPH_INT_STATUS 0x6858 ++# define GRPH_PFLIP_INT_OCCURRED (1 << 0) ++# define GRPH_PFLIP_INT_CLEAR (1 << 8) ++/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ ++#define GRPH_INT_CONTROL 0x685c ++# define GRPH_PFLIP_INT_MASK (1 << 0) ++# define GRPH_PFLIP_INT_TYPE (1 << 8) ++ ++#define DACA_AUTODETECT_INT_CONTROL 0x66c8 ++#define DACB_AUTODETECT_INT_CONTROL 0x67c8 ++ ++#define DC_HPD1_INT_STATUS 0x601c ++#define DC_HPD2_INT_STATUS 0x6028 ++#define DC_HPD3_INT_STATUS 0x6034 ++#define DC_HPD4_INT_STATUS 0x6040 ++#define DC_HPD5_INT_STATUS 0x604c ++#define DC_HPD6_INT_STATUS 0x6058 ++# define DC_HPDx_INT_STATUS (1 << 0) ++# define DC_HPDx_SENSE (1 << 1) ++# define DC_HPDx_RX_INT_STATUS (1 << 8) ++ ++#define DC_HPD1_INT_CONTROL 0x6020 ++#define DC_HPD2_INT_CONTROL 0x602c ++#define DC_HPD3_INT_CONTROL 0x6038 ++#define DC_HPD4_INT_CONTROL 0x6044 ++#define DC_HPD5_INT_CONTROL 0x6050 ++#define DC_HPD6_INT_CONTROL 0x605c ++# define DC_HPDx_INT_ACK (1 << 0) ++# define DC_HPDx_INT_POLARITY (1 << 8) ++# define DC_HPDx_INT_EN (1 << 16) ++# define DC_HPDx_RX_INT_ACK (1 << 20) ++# define DC_HPDx_RX_INT_EN (1 << 24) ++ ++#define DC_HPD1_CONTROL 0x6024 ++#define DC_HPD2_CONTROL 0x6030 ++#define DC_HPD3_CONTROL 0x603c ++#define DC_HPD4_CONTROL 0x6048 ++#define DC_HPD5_CONTROL 0x6054 ++#define DC_HPD6_CONTROL 0x6060 ++# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) ++# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) ++# define DC_HPDx_EN (1 << 28) ++ ++/* ++ * PM4 ++ */ ++#define PACKET_TYPE0 0 ++#define PACKET_TYPE1 1 ++#define PACKET_TYPE2 2 ++#define PACKET_TYPE3 3 ++ ++#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) ++#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) ++#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) ++#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) ++#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ ++ (((reg) >> 2) & 0xFFFF) | \ ++ ((n) & 0x3FFF) << 16) ++#define CP_PACKET2 0x80000000 ++#define PACKET2_PAD_SHIFT 0 ++#define PACKET2_PAD_MASK (0x3fffffff << 0) ++ ++#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) ++ ++#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ ++ (((op) & 0xFF) << 8) | \ ++ ((n) & 0x3FFF) << 16) ++ ++/* Packet 3 types */ ++#define PACKET3_NOP 0x10 ++#define PACKET3_SET_BASE 0x11 ++#define PACKET3_CLEAR_STATE 0x12 ++#define PACKET3_INDIRECT_BUFFER_SIZE 0x13 ++#define PACKET3_DISPATCH_DIRECT 0x15 ++#define PACKET3_DISPATCH_INDIRECT 0x16 ++#define PACKET3_INDIRECT_BUFFER_END 0x17 ++#define PACKET3_SET_PREDICATION 0x20 ++#define PACKET3_REG_RMW 0x21 ++#define PACKET3_COND_EXEC 0x22 ++#define PACKET3_PRED_EXEC 0x23 ++#define PACKET3_DRAW_INDIRECT 0x24 ++#define PACKET3_DRAW_INDEX_INDIRECT 0x25 ++#define PACKET3_INDEX_BASE 0x26 ++#define PACKET3_DRAW_INDEX_2 0x27 ++#define PACKET3_CONTEXT_CONTROL 0x28 ++#define PACKET3_DRAW_INDEX_OFFSET 0x29 ++#define PACKET3_INDEX_TYPE 0x2A ++#define PACKET3_DRAW_INDEX 0x2B ++#define PACKET3_DRAW_INDEX_AUTO 0x2D ++#define PACKET3_DRAW_INDEX_IMMD 0x2E ++#define PACKET3_NUM_INSTANCES 0x2F ++#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 ++#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 ++#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 ++#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 ++#define PACKET3_MEM_SEMAPHORE 0x39 ++#define PACKET3_MPEG_INDEX 0x3A ++#define PACKET3_WAIT_REG_MEM 0x3C ++#define PACKET3_MEM_WRITE 0x3D ++#define PACKET3_INDIRECT_BUFFER 0x32 ++#define PACKET3_SURFACE_SYNC 0x43 ++# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) ++# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) ++# define PACKET3_CB2_DEST_BASE_ENA (1 << 8) ++# define PACKET3_CB3_DEST_BASE_ENA (1 << 9) ++# define PACKET3_CB4_DEST_BASE_ENA (1 << 10) ++# define PACKET3_CB5_DEST_BASE_ENA (1 << 11) ++# define PACKET3_CB6_DEST_BASE_ENA (1 << 12) ++# define PACKET3_CB7_DEST_BASE_ENA (1 << 13) ++# define PACKET3_DB_DEST_BASE_ENA (1 << 14) ++# define PACKET3_CB8_DEST_BASE_ENA (1 << 15) ++# define PACKET3_CB9_DEST_BASE_ENA (1 << 16) ++# define PACKET3_CB10_DEST_BASE_ENA (1 << 17) ++# define PACKET3_CB11_DEST_BASE_ENA (1 << 17) ++# define PACKET3_FULL_CACHE_ENA (1 << 20) ++# define PACKET3_TC_ACTION_ENA (1 << 23) ++# define PACKET3_VC_ACTION_ENA (1 << 24) ++# define PACKET3_CB_ACTION_ENA (1 << 25) ++# define PACKET3_DB_ACTION_ENA (1 << 26) ++# define PACKET3_SH_ACTION_ENA (1 << 27) ++# define PACKET3_SMX_ACTION_ENA (1 << 28) ++#define PACKET3_ME_INITIALIZE 0x44 ++#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) ++#define PACKET3_COND_WRITE 0x45 ++#define PACKET3_EVENT_WRITE 0x46 ++#define PACKET3_EVENT_WRITE_EOP 0x47 ++#define PACKET3_EVENT_WRITE_EOS 0x48 ++#define PACKET3_PREAMBLE_CNTL 0x4A ++#define PACKET3_RB_OFFSET 0x4B ++#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C ++#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D ++#define PACKET3_ALU_PS_CONST_UPDATE 0x4E ++#define PACKET3_ALU_VS_CONST_UPDATE 0x4F ++#define PACKET3_ONE_REG_WRITE 0x57 ++#define PACKET3_SET_CONFIG_REG 0x68 ++#define PACKET3_SET_CONFIG_REG_START 0x00008000 ++#define PACKET3_SET_CONFIG_REG_END 0x0000ac00 ++#define PACKET3_SET_CONTEXT_REG 0x69 ++#define PACKET3_SET_CONTEXT_REG_START 0x00028000 ++#define PACKET3_SET_CONTEXT_REG_END 0x00029000 ++#define PACKET3_SET_ALU_CONST 0x6A ++/* alu const buffers only; no reg file */ ++#define PACKET3_SET_BOOL_CONST 0x6B ++#define PACKET3_SET_BOOL_CONST_START 0x0003a500 ++#define PACKET3_SET_BOOL_CONST_END 0x0003a518 ++#define PACKET3_SET_LOOP_CONST 0x6C ++#define PACKET3_SET_LOOP_CONST_START 0x0003a200 ++#define PACKET3_SET_LOOP_CONST_END 0x0003a500 ++#define PACKET3_SET_RESOURCE 0x6D ++#define PACKET3_SET_RESOURCE_START 0x00030000 ++#define PACKET3_SET_RESOURCE_END 0x00038000 ++#define PACKET3_SET_SAMPLER 0x6E ++#define PACKET3_SET_SAMPLER_START 0x0003c000 ++#define PACKET3_SET_SAMPLER_END 0x0003c600 ++#define PACKET3_SET_CTL_CONST 0x6F ++#define PACKET3_SET_CTL_CONST_START 0x0003cff0 ++#define PACKET3_SET_CTL_CONST_END 0x0003ff0c ++#define PACKET3_SET_RESOURCE_OFFSET 0x70 ++#define PACKET3_SET_ALU_CONST_VS 0x71 ++#define PACKET3_SET_ALU_CONST_DI 0x72 ++#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 ++#define PACKET3_SET_RESOURCE_INDIRECT 0x74 ++#define PACKET3_SET_APPEND_CNT 0x75 ++ ++#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c ++#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) ++#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) ++#define SQ_TEX_VTX_INVALID_TEXTURE 0x0 ++#define SQ_TEX_VTX_INVALID_BUFFER 0x1 ++#define SQ_TEX_VTX_VALID_TEXTURE 0x2 ++#define SQ_TEX_VTX_VALID_BUFFER 0x3 ++ ++#define SQ_CONST_MEM_BASE 0x8df8 ++ ++#define SQ_ESGS_RING_SIZE 0x8c44 ++#define SQ_GSVS_RING_SIZE 0x8c4c ++#define SQ_ESTMP_RING_SIZE 0x8c54 ++#define SQ_GSTMP_RING_SIZE 0x8c5c ++#define SQ_VSTMP_RING_SIZE 0x8c64 ++#define SQ_PSTMP_RING_SIZE 0x8c6c ++#define SQ_LSTMP_RING_SIZE 0x8e14 ++#define SQ_HSTMP_RING_SIZE 0x8e1c ++#define VGT_TF_RING_SIZE 0x8988 ++ ++#define SQ_ESGS_RING_ITEMSIZE 0x28900 ++#define SQ_GSVS_RING_ITEMSIZE 0x28904 ++#define SQ_ESTMP_RING_ITEMSIZE 0x28908 ++#define SQ_GSTMP_RING_ITEMSIZE 0x2890c ++#define SQ_VSTMP_RING_ITEMSIZE 0x28910 ++#define SQ_PSTMP_RING_ITEMSIZE 0x28914 ++#define SQ_LSTMP_RING_ITEMSIZE 0x28830 ++#define SQ_HSTMP_RING_ITEMSIZE 0x28834 ++ ++#define SQ_GS_VERT_ITEMSIZE 0x2891c ++#define SQ_GS_VERT_ITEMSIZE_1 0x28920 ++#define SQ_GS_VERT_ITEMSIZE_2 0x28924 ++#define SQ_GS_VERT_ITEMSIZE_3 0x28928 ++#define SQ_GSVS_RING_OFFSET_1 0x2892c ++#define SQ_GSVS_RING_OFFSET_2 0x28930 ++#define SQ_GSVS_RING_OFFSET_3 0x28934 ++ ++#define SQ_ALU_CONST_CACHE_PS_0 0x28940 ++#define SQ_ALU_CONST_CACHE_PS_1 0x28944 ++#define SQ_ALU_CONST_CACHE_PS_2 0x28948 ++#define SQ_ALU_CONST_CACHE_PS_3 0x2894c ++#define SQ_ALU_CONST_CACHE_PS_4 0x28950 ++#define SQ_ALU_CONST_CACHE_PS_5 0x28954 ++#define SQ_ALU_CONST_CACHE_PS_6 0x28958 ++#define SQ_ALU_CONST_CACHE_PS_7 0x2895c ++#define SQ_ALU_CONST_CACHE_PS_8 0x28960 ++#define SQ_ALU_CONST_CACHE_PS_9 0x28964 ++#define SQ_ALU_CONST_CACHE_PS_10 0x28968 ++#define SQ_ALU_CONST_CACHE_PS_11 0x2896c ++#define SQ_ALU_CONST_CACHE_PS_12 0x28970 ++#define SQ_ALU_CONST_CACHE_PS_13 0x28974 ++#define SQ_ALU_CONST_CACHE_PS_14 0x28978 ++#define SQ_ALU_CONST_CACHE_PS_15 0x2897c ++#define SQ_ALU_CONST_CACHE_VS_0 0x28980 ++#define SQ_ALU_CONST_CACHE_VS_1 0x28984 ++#define SQ_ALU_CONST_CACHE_VS_2 0x28988 ++#define SQ_ALU_CONST_CACHE_VS_3 0x2898c ++#define SQ_ALU_CONST_CACHE_VS_4 0x28990 ++#define SQ_ALU_CONST_CACHE_VS_5 0x28994 ++#define SQ_ALU_CONST_CACHE_VS_6 0x28998 ++#define SQ_ALU_CONST_CACHE_VS_7 0x2899c ++#define SQ_ALU_CONST_CACHE_VS_8 0x289a0 ++#define SQ_ALU_CONST_CACHE_VS_9 0x289a4 ++#define SQ_ALU_CONST_CACHE_VS_10 0x289a8 ++#define SQ_ALU_CONST_CACHE_VS_11 0x289ac ++#define SQ_ALU_CONST_CACHE_VS_12 0x289b0 ++#define SQ_ALU_CONST_CACHE_VS_13 0x289b4 ++#define SQ_ALU_CONST_CACHE_VS_14 0x289b8 ++#define SQ_ALU_CONST_CACHE_VS_15 0x289bc ++#define SQ_ALU_CONST_CACHE_GS_0 0x289c0 ++#define SQ_ALU_CONST_CACHE_GS_1 0x289c4 ++#define SQ_ALU_CONST_CACHE_GS_2 0x289c8 ++#define SQ_ALU_CONST_CACHE_GS_3 0x289cc ++#define SQ_ALU_CONST_CACHE_GS_4 0x289d0 ++#define SQ_ALU_CONST_CACHE_GS_5 0x289d4 ++#define SQ_ALU_CONST_CACHE_GS_6 0x289d8 ++#define SQ_ALU_CONST_CACHE_GS_7 0x289dc ++#define SQ_ALU_CONST_CACHE_GS_8 0x289e0 ++#define SQ_ALU_CONST_CACHE_GS_9 0x289e4 ++#define SQ_ALU_CONST_CACHE_GS_10 0x289e8 ++#define SQ_ALU_CONST_CACHE_GS_11 0x289ec ++#define SQ_ALU_CONST_CACHE_GS_12 0x289f0 ++#define SQ_ALU_CONST_CACHE_GS_13 0x289f4 ++#define SQ_ALU_CONST_CACHE_GS_14 0x289f8 ++#define SQ_ALU_CONST_CACHE_GS_15 0x289fc ++#define SQ_ALU_CONST_CACHE_HS_0 0x28f00 ++#define SQ_ALU_CONST_CACHE_HS_1 0x28f04 ++#define SQ_ALU_CONST_CACHE_HS_2 0x28f08 ++#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c ++#define SQ_ALU_CONST_CACHE_HS_4 0x28f10 ++#define SQ_ALU_CONST_CACHE_HS_5 0x28f14 ++#define SQ_ALU_CONST_CACHE_HS_6 0x28f18 ++#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c ++#define SQ_ALU_CONST_CACHE_HS_8 0x28f20 ++#define SQ_ALU_CONST_CACHE_HS_9 0x28f24 ++#define SQ_ALU_CONST_CACHE_HS_10 0x28f28 ++#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c ++#define SQ_ALU_CONST_CACHE_HS_12 0x28f30 ++#define SQ_ALU_CONST_CACHE_HS_13 0x28f34 ++#define SQ_ALU_CONST_CACHE_HS_14 0x28f38 ++#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c ++#define SQ_ALU_CONST_CACHE_LS_0 0x28f40 ++#define SQ_ALU_CONST_CACHE_LS_1 0x28f44 ++#define SQ_ALU_CONST_CACHE_LS_2 0x28f48 ++#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c ++#define SQ_ALU_CONST_CACHE_LS_4 0x28f50 ++#define SQ_ALU_CONST_CACHE_LS_5 0x28f54 ++#define SQ_ALU_CONST_CACHE_LS_6 0x28f58 ++#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c ++#define SQ_ALU_CONST_CACHE_LS_8 0x28f60 ++#define SQ_ALU_CONST_CACHE_LS_9 0x28f64 ++#define SQ_ALU_CONST_CACHE_LS_10 0x28f68 ++#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c ++#define SQ_ALU_CONST_CACHE_LS_12 0x28f70 ++#define SQ_ALU_CONST_CACHE_LS_13 0x28f74 ++#define SQ_ALU_CONST_CACHE_LS_14 0x28f78 ++#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c ++ ++#define DB_DEPTH_CONTROL 0x28800 ++#define DB_DEPTH_VIEW 0x28008 ++#define DB_HTILE_DATA_BASE 0x28014 ++#define DB_Z_INFO 0x28040 ++# define Z_ARRAY_MODE(x) ((x) << 4) ++#define DB_STENCIL_INFO 0x28044 ++#define DB_Z_READ_BASE 0x28048 ++#define DB_STENCIL_READ_BASE 0x2804c ++#define DB_Z_WRITE_BASE 0x28050 ++#define DB_STENCIL_WRITE_BASE 0x28054 ++#define DB_DEPTH_SIZE 0x28058 ++ ++#define SQ_PGM_START_PS 0x28840 ++#define SQ_PGM_START_VS 0x2885c ++#define SQ_PGM_START_GS 0x28874 ++#define SQ_PGM_START_ES 0x2888c ++#define SQ_PGM_START_FS 0x288a4 ++#define SQ_PGM_START_HS 0x288b8 ++#define SQ_PGM_START_LS 0x288d0 ++ ++#define VGT_STRMOUT_CONFIG 0x28b94 ++#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 ++ ++#define CB_TARGET_MASK 0x28238 ++#define CB_SHADER_MASK 0x2823c ++ ++#define GDS_ADDR_BASE 0x28720 ++ ++#define CB_IMMED0_BASE 0x28b9c ++#define CB_IMMED1_BASE 0x28ba0 ++#define CB_IMMED2_BASE 0x28ba4 ++#define CB_IMMED3_BASE 0x28ba8 ++#define CB_IMMED4_BASE 0x28bac ++#define CB_IMMED5_BASE 0x28bb0 ++#define CB_IMMED6_BASE 0x28bb4 ++#define CB_IMMED7_BASE 0x28bb8 ++#define CB_IMMED8_BASE 0x28bbc ++#define CB_IMMED9_BASE 0x28bc0 ++#define CB_IMMED10_BASE 0x28bc4 ++#define CB_IMMED11_BASE 0x28bc8 ++ ++/* all 12 CB blocks have these regs */ ++#define CB_COLOR0_BASE 0x28c60 ++#define CB_COLOR0_PITCH 0x28c64 ++#define CB_COLOR0_SLICE 0x28c68 ++#define CB_COLOR0_VIEW 0x28c6c ++#define CB_COLOR0_INFO 0x28c70 ++# define CB_ARRAY_MODE(x) ((x) << 8) ++# define ARRAY_LINEAR_GENERAL 0 ++# define ARRAY_LINEAR_ALIGNED 1 ++# define ARRAY_1D_TILED_THIN1 2 ++# define ARRAY_2D_TILED_THIN1 4 ++#define CB_COLOR0_ATTRIB 0x28c74 ++#define CB_COLOR0_DIM 0x28c78 ++/* only CB0-7 blocks have these regs */ ++#define CB_COLOR0_CMASK 0x28c7c ++#define CB_COLOR0_CMASK_SLICE 0x28c80 ++#define CB_COLOR0_FMASK 0x28c84 ++#define CB_COLOR0_FMASK_SLICE 0x28c88 ++#define CB_COLOR0_CLEAR_WORD0 0x28c8c ++#define CB_COLOR0_CLEAR_WORD1 0x28c90 ++#define CB_COLOR0_CLEAR_WORD2 0x28c94 ++#define CB_COLOR0_CLEAR_WORD3 0x28c98 ++ ++#define CB_COLOR1_BASE 0x28c9c ++#define CB_COLOR2_BASE 0x28cd8 ++#define CB_COLOR3_BASE 0x28d14 ++#define CB_COLOR4_BASE 0x28d50 ++#define CB_COLOR5_BASE 0x28d8c ++#define CB_COLOR6_BASE 0x28dc8 ++#define CB_COLOR7_BASE 0x28e04 ++#define CB_COLOR8_BASE 0x28e40 ++#define CB_COLOR9_BASE 0x28e5c ++#define CB_COLOR10_BASE 0x28e78 ++#define CB_COLOR11_BASE 0x28e94 ++ ++#define CB_COLOR1_PITCH 0x28ca0 ++#define CB_COLOR2_PITCH 0x28cdc ++#define CB_COLOR3_PITCH 0x28d18 ++#define CB_COLOR4_PITCH 0x28d54 ++#define CB_COLOR5_PITCH 0x28d90 ++#define CB_COLOR6_PITCH 0x28dcc ++#define CB_COLOR7_PITCH 0x28e08 ++#define CB_COLOR8_PITCH 0x28e44 ++#define CB_COLOR9_PITCH 0x28e60 ++#define CB_COLOR10_PITCH 0x28e7c ++#define CB_COLOR11_PITCH 0x28e98 ++ ++#define CB_COLOR1_SLICE 0x28ca4 ++#define CB_COLOR2_SLICE 0x28ce0 ++#define CB_COLOR3_SLICE 0x28d1c ++#define CB_COLOR4_SLICE 0x28d58 ++#define CB_COLOR5_SLICE 0x28d94 ++#define CB_COLOR6_SLICE 0x28dd0 ++#define CB_COLOR7_SLICE 0x28e0c ++#define CB_COLOR8_SLICE 0x28e48 ++#define CB_COLOR9_SLICE 0x28e64 ++#define CB_COLOR10_SLICE 0x28e80 ++#define CB_COLOR11_SLICE 0x28e9c ++ ++#define CB_COLOR1_VIEW 0x28ca8 ++#define CB_COLOR2_VIEW 0x28ce4 ++#define CB_COLOR3_VIEW 0x28d20 ++#define CB_COLOR4_VIEW 0x28d5c ++#define CB_COLOR5_VIEW 0x28d98 ++#define CB_COLOR6_VIEW 0x28dd4 ++#define CB_COLOR7_VIEW 0x28e10 ++#define CB_COLOR8_VIEW 0x28e4c ++#define CB_COLOR9_VIEW 0x28e68 ++#define CB_COLOR10_VIEW 0x28e84 ++#define CB_COLOR11_VIEW 0x28ea0 ++ ++#define CB_COLOR1_INFO 0x28cac ++#define CB_COLOR2_INFO 0x28ce8 ++#define CB_COLOR3_INFO 0x28d24 ++#define CB_COLOR4_INFO 0x28d60 ++#define CB_COLOR5_INFO 0x28d9c ++#define CB_COLOR6_INFO 0x28dd8 ++#define CB_COLOR7_INFO 0x28e14 ++#define CB_COLOR8_INFO 0x28e50 ++#define CB_COLOR9_INFO 0x28e6c ++#define CB_COLOR10_INFO 0x28e88 ++#define CB_COLOR11_INFO 0x28ea4 ++ ++#define CB_COLOR1_ATTRIB 0x28cb0 ++#define CB_COLOR2_ATTRIB 0x28cec ++#define CB_COLOR3_ATTRIB 0x28d28 ++#define CB_COLOR4_ATTRIB 0x28d64 ++#define CB_COLOR5_ATTRIB 0x28da0 ++#define CB_COLOR6_ATTRIB 0x28ddc ++#define CB_COLOR7_ATTRIB 0x28e18 ++#define CB_COLOR8_ATTRIB 0x28e54 ++#define CB_COLOR9_ATTRIB 0x28e70 ++#define CB_COLOR10_ATTRIB 0x28e8c ++#define CB_COLOR11_ATTRIB 0x28ea8 ++ ++#define CB_COLOR1_DIM 0x28cb4 ++#define CB_COLOR2_DIM 0x28cf0 ++#define CB_COLOR3_DIM 0x28d2c ++#define CB_COLOR4_DIM 0x28d68 ++#define CB_COLOR5_DIM 0x28da4 ++#define CB_COLOR6_DIM 0x28de0 ++#define CB_COLOR7_DIM 0x28e1c ++#define CB_COLOR8_DIM 0x28e58 ++#define CB_COLOR9_DIM 0x28e74 ++#define CB_COLOR10_DIM 0x28e90 ++#define CB_COLOR11_DIM 0x28eac ++ ++#define CB_COLOR1_CMASK 0x28cb8 ++#define CB_COLOR2_CMASK 0x28cf4 ++#define CB_COLOR3_CMASK 0x28d30 ++#define CB_COLOR4_CMASK 0x28d6c ++#define CB_COLOR5_CMASK 0x28da8 ++#define CB_COLOR6_CMASK 0x28de4 ++#define CB_COLOR7_CMASK 0x28e20 ++ ++#define CB_COLOR1_CMASK_SLICE 0x28cbc ++#define CB_COLOR2_CMASK_SLICE 0x28cf8 ++#define CB_COLOR3_CMASK_SLICE 0x28d34 ++#define CB_COLOR4_CMASK_SLICE 0x28d70 ++#define CB_COLOR5_CMASK_SLICE 0x28dac ++#define CB_COLOR6_CMASK_SLICE 0x28de8 ++#define CB_COLOR7_CMASK_SLICE 0x28e24 ++ ++#define CB_COLOR1_FMASK 0x28cc0 ++#define CB_COLOR2_FMASK 0x28cfc ++#define CB_COLOR3_FMASK 0x28d38 ++#define CB_COLOR4_FMASK 0x28d74 ++#define CB_COLOR5_FMASK 0x28db0 ++#define CB_COLOR6_FMASK 0x28dec ++#define CB_COLOR7_FMASK 0x28e28 ++ ++#define CB_COLOR1_FMASK_SLICE 0x28cc4 ++#define CB_COLOR2_FMASK_SLICE 0x28d00 ++#define CB_COLOR3_FMASK_SLICE 0x28d3c ++#define CB_COLOR4_FMASK_SLICE 0x28d78 ++#define CB_COLOR5_FMASK_SLICE 0x28db4 ++#define CB_COLOR6_FMASK_SLICE 0x28df0 ++#define CB_COLOR7_FMASK_SLICE 0x28e2c ++ ++#define CB_COLOR1_CLEAR_WORD0 0x28cc8 ++#define CB_COLOR2_CLEAR_WORD0 0x28d04 ++#define CB_COLOR3_CLEAR_WORD0 0x28d40 ++#define CB_COLOR4_CLEAR_WORD0 0x28d7c ++#define CB_COLOR5_CLEAR_WORD0 0x28db8 ++#define CB_COLOR6_CLEAR_WORD0 0x28df4 ++#define CB_COLOR7_CLEAR_WORD0 0x28e30 ++ ++#define CB_COLOR1_CLEAR_WORD1 0x28ccc ++#define CB_COLOR2_CLEAR_WORD1 0x28d08 ++#define CB_COLOR3_CLEAR_WORD1 0x28d44 ++#define CB_COLOR4_CLEAR_WORD1 0x28d80 ++#define CB_COLOR5_CLEAR_WORD1 0x28dbc ++#define CB_COLOR6_CLEAR_WORD1 0x28df8 ++#define CB_COLOR7_CLEAR_WORD1 0x28e34 ++ ++#define CB_COLOR1_CLEAR_WORD2 0x28cd0 ++#define CB_COLOR2_CLEAR_WORD2 0x28d0c ++#define CB_COLOR3_CLEAR_WORD2 0x28d48 ++#define CB_COLOR4_CLEAR_WORD2 0x28d84 ++#define CB_COLOR5_CLEAR_WORD2 0x28dc0 ++#define CB_COLOR6_CLEAR_WORD2 0x28dfc ++#define CB_COLOR7_CLEAR_WORD2 0x28e38 ++ ++#define CB_COLOR1_CLEAR_WORD3 0x28cd4 ++#define CB_COLOR2_CLEAR_WORD3 0x28d10 ++#define CB_COLOR3_CLEAR_WORD3 0x28d4c ++#define CB_COLOR4_CLEAR_WORD3 0x28d88 ++#define CB_COLOR5_CLEAR_WORD3 0x28dc4 ++#define CB_COLOR6_CLEAR_WORD3 0x28e00 ++#define CB_COLOR7_CLEAR_WORD3 0x28e3c ++ ++#define SQ_TEX_RESOURCE_WORD0_0 0x30000 ++#define SQ_TEX_RESOURCE_WORD1_0 0x30004 ++# define TEX_ARRAY_MODE(x) ((x) << 28) ++#define SQ_TEX_RESOURCE_WORD2_0 0x30008 ++#define SQ_TEX_RESOURCE_WORD3_0 0x3000C ++#define SQ_TEX_RESOURCE_WORD4_0 0x30010 ++#define SQ_TEX_RESOURCE_WORD5_0 0x30014 ++#define SQ_TEX_RESOURCE_WORD6_0 0x30018 ++#define SQ_TEX_RESOURCE_WORD7_0 0x3001c ++ ++ ++#endif +diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c +index cf60c0b..cf89aa2 100644 +--- a/drivers/gpu/drm/radeon/r100.c ++++ b/drivers/gpu/drm/radeon/r100.c +@@ -37,6 +37,7 @@ + #include "rs100d.h" + #include "rv200d.h" + #include "rv250d.h" ++#include "atom.h" + + #include + #include +@@ -67,6 +68,274 @@ MODULE_FIRMWARE(FIRMWARE_R520); + * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 + */ + ++void r100_pm_get_dynpm_state(struct radeon_device *rdev) ++{ ++ int i; ++ rdev->pm.dynpm_can_upclock = true; ++ rdev->pm.dynpm_can_downclock = true; ++ ++ switch (rdev->pm.dynpm_planned_action) { ++ case DYNPM_ACTION_MINIMUM: ++ rdev->pm.requested_power_state_index = 0; ++ rdev->pm.dynpm_can_downclock = false; ++ break; ++ case DYNPM_ACTION_DOWNCLOCK: ++ if (rdev->pm.current_power_state_index == 0) { ++ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; ++ rdev->pm.dynpm_can_downclock = false; ++ } else { ++ if (rdev->pm.active_crtc_count > 1) { ++ for (i = 0; i < rdev->pm.num_power_states; i++) { ++ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) ++ continue; ++ else if (i >= rdev->pm.current_power_state_index) { ++ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; ++ break; ++ } else { ++ rdev->pm.requested_power_state_index = i; ++ break; ++ } ++ } ++ } else ++ rdev->pm.requested_power_state_index = ++ rdev->pm.current_power_state_index - 1; ++ } ++ /* don't use the power state if crtcs are active and no display flag is set */ ++ if ((rdev->pm.active_crtc_count > 0) && ++ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & ++ RADEON_PM_MODE_NO_DISPLAY)) { ++ rdev->pm.requested_power_state_index++; ++ } ++ break; ++ case DYNPM_ACTION_UPCLOCK: ++ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { ++ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; ++ rdev->pm.dynpm_can_upclock = false; ++ } else { ++ if (rdev->pm.active_crtc_count > 1) { ++ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { ++ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) ++ continue; ++ else if (i <= rdev->pm.current_power_state_index) { ++ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; ++ break; ++ } else { ++ rdev->pm.requested_power_state_index = i; ++ break; ++ } ++ } ++ } else ++ rdev->pm.requested_power_state_index = ++ rdev->pm.current_power_state_index + 1; ++ } ++ break; ++ case DYNPM_ACTION_DEFAULT: ++ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; ++ rdev->pm.dynpm_can_upclock = false; ++ break; ++ case DYNPM_ACTION_NONE: ++ default: ++ DRM_ERROR("Requested mode for not defined action\n"); ++ return; ++ } ++ /* only one clock mode per power state */ ++ rdev->pm.requested_clock_mode_index = 0; ++ ++ DRM_DEBUG("Requested: e: %d m: %d p: %d\n", ++ rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].sclk, ++ rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].mclk, ++ rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ pcie_lanes); ++} ++ ++void r100_pm_init_profile(struct radeon_device *rdev) ++{ ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* mid mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; ++} ++ ++void r100_pm_misc(struct radeon_device *rdev) ++{ ++ int requested_index = rdev->pm.requested_power_state_index; ++ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; ++ struct radeon_voltage *voltage = &ps->clock_info[0].voltage; ++ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; ++ ++ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { ++ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { ++ tmp = RREG32(voltage->gpio.reg); ++ if (voltage->active_high) ++ tmp |= voltage->gpio.mask; ++ else ++ tmp &= ~(voltage->gpio.mask); ++ WREG32(voltage->gpio.reg, tmp); ++ if (voltage->delay) ++ udelay(voltage->delay); ++ } else { ++ tmp = RREG32(voltage->gpio.reg); ++ if (voltage->active_high) ++ tmp &= ~voltage->gpio.mask; ++ else ++ tmp |= voltage->gpio.mask; ++ WREG32(voltage->gpio.reg, tmp); ++ if (voltage->delay) ++ udelay(voltage->delay); ++ } ++ } ++ ++ sclk_cntl = RREG32_PLL(SCLK_CNTL); ++ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); ++ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); ++ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); ++ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); ++ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { ++ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; ++ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) ++ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; ++ else ++ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; ++ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) ++ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); ++ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) ++ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); ++ } else ++ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; ++ ++ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { ++ sclk_more_cntl |= IO_CG_VOLTAGE_DROP; ++ if (voltage->delay) { ++ sclk_more_cntl |= VOLTAGE_DROP_SYNC; ++ switch (voltage->delay) { ++ case 33: ++ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); ++ break; ++ case 66: ++ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); ++ break; ++ case 99: ++ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); ++ break; ++ case 132: ++ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); ++ break; ++ } ++ } else ++ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; ++ } else ++ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; ++ ++ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) ++ sclk_cntl &= ~FORCE_HDP; ++ else ++ sclk_cntl |= FORCE_HDP; ++ ++ WREG32_PLL(SCLK_CNTL, sclk_cntl); ++ WREG32_PLL(SCLK_CNTL2, sclk_cntl2); ++ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); ++ ++ /* set pcie lanes */ ++ if ((rdev->flags & RADEON_IS_PCIE) && ++ !(rdev->flags & RADEON_IS_IGP) && ++ rdev->asic->set_pcie_lanes && ++ (ps->pcie_lanes != ++ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { ++ radeon_set_pcie_lanes(rdev, ++ ps->pcie_lanes); ++ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); ++ } ++} ++ ++void r100_pm_prepare(struct radeon_device *rdev) ++{ ++ struct drm_device *ddev = rdev->ddev; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ u32 tmp; ++ ++ /* disable any active CRTCs */ ++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { ++ if (radeon_crtc->crtc_id) { ++ tmp = RREG32(RADEON_CRTC2_GEN_CNTL); ++ tmp |= RADEON_CRTC2_DISP_REQ_EN_B; ++ WREG32(RADEON_CRTC2_GEN_CNTL, tmp); ++ } else { ++ tmp = RREG32(RADEON_CRTC_GEN_CNTL); ++ tmp |= RADEON_CRTC_DISP_REQ_EN_B; ++ WREG32(RADEON_CRTC_GEN_CNTL, tmp); ++ } ++ } ++ } ++} ++ ++void r100_pm_finish(struct radeon_device *rdev) ++{ ++ struct drm_device *ddev = rdev->ddev; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ u32 tmp; ++ ++ /* enable any active CRTCs */ ++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { ++ if (radeon_crtc->crtc_id) { ++ tmp = RREG32(RADEON_CRTC2_GEN_CNTL); ++ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; ++ WREG32(RADEON_CRTC2_GEN_CNTL, tmp); ++ } else { ++ tmp = RREG32(RADEON_CRTC_GEN_CNTL); ++ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; ++ WREG32(RADEON_CRTC_GEN_CNTL, tmp); ++ } ++ } ++ } ++} ++ ++bool r100_gui_idle(struct radeon_device *rdev) ++{ ++ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) ++ return false; ++ else ++ return true; ++} ++ + /* hpd for digital panel detect/disconnect */ + bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) + { +@@ -254,6 +523,9 @@ int r100_irq_set(struct radeon_device *rdev) + if (rdev->irq.sw_int) { + tmp |= RADEON_SW_INT_ENABLE; + } ++ if (rdev->irq.gui_idle) { ++ tmp |= RADEON_GUI_IDLE_MASK; ++ } + if (rdev->irq.crtc_vblank_int[0]) { + tmp |= RADEON_CRTC_VBLANK_MASK; + } +@@ -288,6 +560,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) + RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | + RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; + ++ /* the interrupt works, but the status bit is permanently asserted */ ++ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { ++ if (!rdev->irq.gui_idle_acked) ++ irq_mask |= RADEON_GUI_IDLE_STAT; ++ } ++ + if (irqs) { + WREG32(RADEON_GEN_INT_STATUS, irqs); + } +@@ -299,6 +577,9 @@ int r100_irq_process(struct radeon_device *rdev) + uint32_t status, msi_rearm; + bool queue_hotplug = false; + ++ /* reset gui idle ack. the status bit is broken */ ++ rdev->irq.gui_idle_acked = false; ++ + status = r100_irq_ack(rdev); + if (!status) { + return IRQ_NONE; +@@ -311,6 +592,12 @@ int r100_irq_process(struct radeon_device *rdev) + if (status & RADEON_SW_INT_TEST) { + radeon_fence_process(rdev); + } ++ /* gui idle interrupt */ ++ if (status & RADEON_GUI_IDLE_STAT) { ++ rdev->irq.gui_idle_acked = true; ++ rdev->pm.gui_idle = true; ++ wake_up(&rdev->irq.idle_queue); ++ } + /* Vertical blank interrupts */ + if (status & RADEON_CRTC_VBLANK_STAT) { + drm_handle_vblank(rdev->ddev, 0); +@@ -332,6 +619,8 @@ int r100_irq_process(struct radeon_device *rdev) + } + status = r100_irq_ack(rdev); + } ++ /* reset gui idle ack. the status bit is broken */ ++ rdev->irq.gui_idle_acked = false; + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); + if (rdev->msi_enabled) { +@@ -663,26 +952,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) + if (r100_debugfs_cp_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for CP !\n"); + } +- /* Reset CP */ +- tmp = RREG32(RADEON_CP_CSQ_STAT); +- if ((tmp & (1 << 31))) { +- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); +- WREG32(RADEON_CP_CSQ_MODE, 0); +- WREG32(RADEON_CP_CSQ_CNTL, 0); +- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); +- tmp = RREG32(RADEON_RBBM_SOFT_RESET); +- mdelay(2); +- WREG32(RADEON_RBBM_SOFT_RESET, 0); +- tmp = RREG32(RADEON_RBBM_SOFT_RESET); +- mdelay(2); +- tmp = RREG32(RADEON_CP_CSQ_STAT); +- if ((tmp & (1 << 31))) { +- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); +- } +- } else { +- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); +- } +- + if (!rdev->me_fw) { + r = r100_cp_init_microcode(rdev); + if (r) { +@@ -787,39 +1056,6 @@ void r100_cp_disable(struct radeon_device *rdev) + } + } + +-int r100_cp_reset(struct radeon_device *rdev) +-{ +- uint32_t tmp; +- bool reinit_cp; +- int i; +- +- reinit_cp = rdev->cp.ready; +- rdev->cp.ready = false; +- WREG32(RADEON_CP_CSQ_MODE, 0); +- WREG32(RADEON_CP_CSQ_CNTL, 0); +- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); +- (void)RREG32(RADEON_RBBM_SOFT_RESET); +- udelay(200); +- WREG32(RADEON_RBBM_SOFT_RESET, 0); +- /* Wait to prevent race in RBBM_STATUS */ +- mdelay(1); +- for (i = 0; i < rdev->usec_timeout; i++) { +- tmp = RREG32(RADEON_RBBM_STATUS); +- if (!(tmp & (1 << 16))) { +- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", +- tmp); +- if (reinit_cp) { +- return r100_cp_init(rdev, rdev->cp.ring_size); +- } +- return 0; +- } +- DRM_UDELAY(1); +- } +- tmp = RREG32(RADEON_RBBM_STATUS); +- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); +- return -1; +-} +- + void r100_cp_commit(struct radeon_device *rdev) + { + WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); +@@ -1733,76 +1969,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev) + return -1; + } + +-void r100_gpu_init(struct radeon_device *rdev) ++void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) + { +- /* TODO: anythings to do here ? pipes ? */ +- r100_hdp_reset(rdev); ++ lockup->last_cp_rptr = cp->rptr; ++ lockup->last_jiffies = jiffies; ++} ++ ++/** ++ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information ++ * @rdev: radeon device structure ++ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations ++ * @cp: radeon_cp structure holding CP information ++ * ++ * We don't need to initialize the lockup tracking information as we will either ++ * have CP rptr to a different value of jiffies wrap around which will force ++ * initialization of the lockup tracking informations. ++ * ++ * A possible false positivie is if we get call after while and last_cp_rptr == ++ * the current CP rptr, even if it's unlikely it might happen. To avoid this ++ * if the elapsed time since last call is bigger than 2 second than we return ++ * false and update the tracking information. Due to this the caller must call ++ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported ++ * the fencing code should be cautious about that. ++ * ++ * Caller should write to the ring to force CP to do something so we don't get ++ * false positive when CP is just gived nothing to do. ++ * ++ **/ ++bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) ++{ ++ unsigned long cjiffies, elapsed; ++ ++ cjiffies = jiffies; ++ if (!time_after(cjiffies, lockup->last_jiffies)) { ++ /* likely a wrap around */ ++ lockup->last_cp_rptr = cp->rptr; ++ lockup->last_jiffies = jiffies; ++ return false; ++ } ++ if (cp->rptr != lockup->last_cp_rptr) { ++ /* CP is still working no lockup */ ++ lockup->last_cp_rptr = cp->rptr; ++ lockup->last_jiffies = jiffies; ++ return false; ++ } ++ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); ++ if (elapsed >= 3000) { ++ /* very likely the improbable case where current ++ * rptr is equal to last recorded, a while ago, rptr ++ * this is more likely a false positive update tracking ++ * information which should force us to be recall at ++ * latter point ++ */ ++ lockup->last_cp_rptr = cp->rptr; ++ lockup->last_jiffies = jiffies; ++ return false; ++ } ++ if (elapsed >= 1000) { ++ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); ++ return true; ++ } ++ /* give a chance to the GPU ... */ ++ return false; + } + +-void r100_hdp_reset(struct radeon_device *rdev) ++bool r100_gpu_is_lockup(struct radeon_device *rdev) + { +- uint32_t tmp; ++ u32 rbbm_status; ++ int r; + +- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; +- tmp |= (7 << 28); +- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); +- (void)RREG32(RADEON_HOST_PATH_CNTL); +- udelay(200); +- WREG32(RADEON_RBBM_SOFT_RESET, 0); +- WREG32(RADEON_HOST_PATH_CNTL, tmp); +- (void)RREG32(RADEON_HOST_PATH_CNTL); ++ rbbm_status = RREG32(R_000E40_RBBM_STATUS); ++ if (!G_000E40_GUI_ACTIVE(rbbm_status)) { ++ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); ++ return false; ++ } ++ /* force CP activities */ ++ r = radeon_ring_lock(rdev, 2); ++ if (!r) { ++ /* PACKET2 NOP */ ++ radeon_ring_write(rdev, 0x80000000); ++ radeon_ring_write(rdev, 0x80000000); ++ radeon_ring_unlock_commit(rdev); ++ } ++ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); ++ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); + } + +-int r100_rb2d_reset(struct radeon_device *rdev) ++void r100_bm_disable(struct radeon_device *rdev) + { +- uint32_t tmp; +- int i; ++ u32 tmp; + +- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); +- (void)RREG32(RADEON_RBBM_SOFT_RESET); +- udelay(200); +- WREG32(RADEON_RBBM_SOFT_RESET, 0); +- /* Wait to prevent race in RBBM_STATUS */ ++ /* disable bus mastering */ ++ tmp = RREG32(R_000030_BUS_CNTL); ++ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); ++ mdelay(1); ++ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); ++ mdelay(1); ++ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); ++ tmp = RREG32(RADEON_BUS_CNTL); ++ mdelay(1); ++ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); ++ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); + mdelay(1); +- for (i = 0; i < rdev->usec_timeout; i++) { +- tmp = RREG32(RADEON_RBBM_STATUS); +- if (!(tmp & (1 << 26))) { +- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", +- tmp); +- return 0; +- } +- DRM_UDELAY(1); +- } +- tmp = RREG32(RADEON_RBBM_STATUS); +- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); +- return -1; + } + +-int r100_gpu_reset(struct radeon_device *rdev) ++int r100_asic_reset(struct radeon_device *rdev) + { +- uint32_t status; ++ struct r100_mc_save save; ++ u32 status, tmp; + +- /* reset order likely matter */ +- status = RREG32(RADEON_RBBM_STATUS); +- /* reset HDP */ +- r100_hdp_reset(rdev); +- /* reset rb2d */ +- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { +- r100_rb2d_reset(rdev); ++ r100_mc_stop(rdev, &save); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ if (!G_000E40_GUI_ACTIVE(status)) { ++ return 0; + } +- /* TODO: reset 3D engine */ ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* stop CP */ ++ WREG32(RADEON_CP_CSQ_CNTL, 0); ++ tmp = RREG32(RADEON_CP_RB_CNTL); ++ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); ++ WREG32(RADEON_CP_RB_RPTR_WR, 0); ++ WREG32(RADEON_CP_RB_WPTR, 0); ++ WREG32(RADEON_CP_RB_CNTL, tmp); ++ /* save PCI state */ ++ pci_save_state(rdev->pdev); ++ /* disable bus mastering */ ++ r100_bm_disable(rdev); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | ++ S_0000F0_SOFT_RESET_RE(1) | ++ S_0000F0_SOFT_RESET_PP(1) | ++ S_0000F0_SOFT_RESET_RB(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* reset CP */ +- status = RREG32(RADEON_RBBM_STATUS); +- if (status & (1 << 16)) { +- r100_cp_reset(rdev); +- } ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* restore PCI & busmastering */ ++ pci_restore_state(rdev->pdev); ++ r100_enable_bm(rdev); + /* Check if GPU is idle */ +- status = RREG32(RADEON_RBBM_STATUS); +- if (status & RADEON_RBBM_ACTIVE) { +- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); ++ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || ++ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { ++ dev_err(rdev->dev, "failed to reset GPU\n"); ++ rdev->gpu_lockup = true; + return -1; + } +- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); ++ r100_mc_resume(rdev, &save); ++ dev_info(rdev->dev, "GPU reset succeed\n"); + return 0; + } + +@@ -2002,11 +2325,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) + else + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; + } +- /* FIXME remove this once we support unmappable VRAM */ +- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { +- rdev->mc.mc_vram_size = rdev->mc.aper_size; +- rdev->mc.real_vram_size = rdev->mc.aper_size; +- } + } + + void r100_vga_set_state(struct radeon_device *rdev, bool state) +@@ -2335,53 +2653,53 @@ void r100_bandwidth_update(struct radeon_device *rdev) + fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; + uint32_t temp, data, mem_trcd, mem_trp, mem_tras; + fixed20_12 memtcas_ff[8] = { +- fixed_init(1), +- fixed_init(2), +- fixed_init(3), +- fixed_init(0), +- fixed_init_half(1), +- fixed_init_half(2), +- fixed_init(0), ++ dfixed_init(1), ++ dfixed_init(2), ++ dfixed_init(3), ++ dfixed_init(0), ++ dfixed_init_half(1), ++ dfixed_init_half(2), ++ dfixed_init(0), + }; + fixed20_12 memtcas_rs480_ff[8] = { +- fixed_init(0), +- fixed_init(1), +- fixed_init(2), +- fixed_init(3), +- fixed_init(0), +- fixed_init_half(1), +- fixed_init_half(2), +- fixed_init_half(3), ++ dfixed_init(0), ++ dfixed_init(1), ++ dfixed_init(2), ++ dfixed_init(3), ++ dfixed_init(0), ++ dfixed_init_half(1), ++ dfixed_init_half(2), ++ dfixed_init_half(3), + }; + fixed20_12 memtcas2_ff[8] = { +- fixed_init(0), +- fixed_init(1), +- fixed_init(2), +- fixed_init(3), +- fixed_init(4), +- fixed_init(5), +- fixed_init(6), +- fixed_init(7), ++ dfixed_init(0), ++ dfixed_init(1), ++ dfixed_init(2), ++ dfixed_init(3), ++ dfixed_init(4), ++ dfixed_init(5), ++ dfixed_init(6), ++ dfixed_init(7), + }; + fixed20_12 memtrbs[8] = { +- fixed_init(1), +- fixed_init_half(1), +- fixed_init(2), +- fixed_init_half(2), +- fixed_init(3), +- fixed_init_half(3), +- fixed_init(4), +- fixed_init_half(4) ++ dfixed_init(1), ++ dfixed_init_half(1), ++ dfixed_init(2), ++ dfixed_init_half(2), ++ dfixed_init(3), ++ dfixed_init_half(3), ++ dfixed_init(4), ++ dfixed_init_half(4) + }; + fixed20_12 memtrbs_r4xx[8] = { +- fixed_init(4), +- fixed_init(5), +- fixed_init(6), +- fixed_init(7), +- fixed_init(8), +- fixed_init(9), +- fixed_init(10), +- fixed_init(11) ++ dfixed_init(4), ++ dfixed_init(5), ++ dfixed_init(6), ++ dfixed_init(7), ++ dfixed_init(8), ++ dfixed_init(9), ++ dfixed_init(10), ++ dfixed_init(11) + }; + fixed20_12 min_mem_eff; + fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; +@@ -2412,7 +2730,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) + } + } + +- min_mem_eff.full = rfixed_const_8(0); ++ min_mem_eff.full = dfixed_const_8(0); + /* get modes */ + if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { + uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); +@@ -2433,28 +2751,28 @@ void r100_bandwidth_update(struct radeon_device *rdev) + mclk_ff = rdev->pm.mclk; + + temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); +- temp_ff.full = rfixed_const(temp); +- mem_bw.full = rfixed_mul(mclk_ff, temp_ff); ++ temp_ff.full = dfixed_const(temp); ++ mem_bw.full = dfixed_mul(mclk_ff, temp_ff); + + pix_clk.full = 0; + pix_clk2.full = 0; + peak_disp_bw.full = 0; + if (mode1) { +- temp_ff.full = rfixed_const(1000); +- pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ +- pix_clk.full = rfixed_div(pix_clk, temp_ff); +- temp_ff.full = rfixed_const(pixel_bytes1); +- peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); ++ temp_ff.full = dfixed_const(1000); ++ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ ++ pix_clk.full = dfixed_div(pix_clk, temp_ff); ++ temp_ff.full = dfixed_const(pixel_bytes1); ++ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); + } + if (mode2) { +- temp_ff.full = rfixed_const(1000); +- pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ +- pix_clk2.full = rfixed_div(pix_clk2, temp_ff); +- temp_ff.full = rfixed_const(pixel_bytes2); +- peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); ++ temp_ff.full = dfixed_const(1000); ++ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ ++ pix_clk2.full = dfixed_div(pix_clk2, temp_ff); ++ temp_ff.full = dfixed_const(pixel_bytes2); ++ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); + } + +- mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); ++ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); + if (peak_disp_bw.full >= mem_bw.full) { + DRM_ERROR("You may not have enough display bandwidth for current mode\n" + "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); +@@ -2496,9 +2814,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) + mem_tras = ((temp >> 12) & 0xf) + 4; + } + /* convert to FF */ +- trcd_ff.full = rfixed_const(mem_trcd); +- trp_ff.full = rfixed_const(mem_trp); +- tras_ff.full = rfixed_const(mem_tras); ++ trcd_ff.full = dfixed_const(mem_trcd); ++ trp_ff.full = dfixed_const(mem_trp); ++ tras_ff.full = dfixed_const(mem_tras); + + /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ + temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); +@@ -2516,7 +2834,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) + /* extra cas latency stored in bits 23-25 0-4 clocks */ + data = (temp >> 23) & 0x7; + if (data < 5) +- tcas_ff.full += rfixed_const(data); ++ tcas_ff.full += dfixed_const(data); + } + + if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { +@@ -2553,72 +2871,72 @@ void r100_bandwidth_update(struct radeon_device *rdev) + + if (rdev->flags & RADEON_IS_AGP) { + fixed20_12 agpmode_ff; +- agpmode_ff.full = rfixed_const(radeon_agpmode); +- temp_ff.full = rfixed_const_666(16); +- sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); ++ agpmode_ff.full = dfixed_const(radeon_agpmode); ++ temp_ff.full = dfixed_const_666(16); ++ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); + } + /* TODO PCIE lanes may affect this - agpmode == 16?? */ + + if (ASIC_IS_R300(rdev)) { +- sclk_delay_ff.full = rfixed_const(250); ++ sclk_delay_ff.full = dfixed_const(250); + } else { + if ((rdev->family == CHIP_RV100) || + rdev->flags & RADEON_IS_IGP) { + if (rdev->mc.vram_is_ddr) +- sclk_delay_ff.full = rfixed_const(41); ++ sclk_delay_ff.full = dfixed_const(41); + else +- sclk_delay_ff.full = rfixed_const(33); ++ sclk_delay_ff.full = dfixed_const(33); + } else { + if (rdev->mc.vram_width == 128) +- sclk_delay_ff.full = rfixed_const(57); ++ sclk_delay_ff.full = dfixed_const(57); + else +- sclk_delay_ff.full = rfixed_const(41); ++ sclk_delay_ff.full = dfixed_const(41); + } + } + +- mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); ++ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); + + if (rdev->mc.vram_is_ddr) { + if (rdev->mc.vram_width == 32) { +- k1.full = rfixed_const(40); ++ k1.full = dfixed_const(40); + c = 3; + } else { +- k1.full = rfixed_const(20); ++ k1.full = dfixed_const(20); + c = 1; + } + } else { +- k1.full = rfixed_const(40); ++ k1.full = dfixed_const(40); + c = 3; + } + +- temp_ff.full = rfixed_const(2); +- mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); +- temp_ff.full = rfixed_const(c); +- mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); +- temp_ff.full = rfixed_const(4); +- mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); +- mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); ++ temp_ff.full = dfixed_const(2); ++ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); ++ temp_ff.full = dfixed_const(c); ++ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); ++ temp_ff.full = dfixed_const(4); ++ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); ++ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); + mc_latency_mclk.full += k1.full; + +- mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); +- mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); ++ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); ++ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); + + /* + HW cursor time assuming worst case of full size colour cursor. + */ +- temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); ++ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); + temp_ff.full += trcd_ff.full; + if (temp_ff.full < tras_ff.full) + temp_ff.full = tras_ff.full; +- cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); ++ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); + +- temp_ff.full = rfixed_const(cur_size); +- cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); ++ temp_ff.full = dfixed_const(cur_size); ++ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); + /* + Find the total latency for the display data. + */ +- disp_latency_overhead.full = rfixed_const(8); +- disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); ++ disp_latency_overhead.full = dfixed_const(8); ++ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); + mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; + mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; + +@@ -2646,16 +2964,16 @@ void r100_bandwidth_update(struct radeon_device *rdev) + /* + Find the drain rate of the display buffer. + */ +- temp_ff.full = rfixed_const((16/pixel_bytes1)); +- disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); ++ temp_ff.full = dfixed_const((16/pixel_bytes1)); ++ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); + + /* + Find the critical point of the display buffer. + */ +- crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); +- crit_point_ff.full += rfixed_const_half(0); ++ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); ++ crit_point_ff.full += dfixed_const_half(0); + +- critical_point = rfixed_trunc(crit_point_ff); ++ critical_point = dfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point = 0; +@@ -2726,8 +3044,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) + /* + Find the drain rate of the display buffer. + */ +- temp_ff.full = rfixed_const((16/pixel_bytes2)); +- disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); ++ temp_ff.full = dfixed_const((16/pixel_bytes2)); ++ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); + + grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); + grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); +@@ -2748,8 +3066,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) + critical_point2 = 0; + else { + temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; +- temp_ff.full = rfixed_const(temp); +- temp_ff.full = rfixed_mul(mclk_ff, temp_ff); ++ temp_ff.full = dfixed_const(temp); ++ temp_ff.full = dfixed_mul(mclk_ff, temp_ff); + if (sclk_ff.full < temp_ff.full) + temp_ff.full = sclk_ff.full; + +@@ -2757,15 +3075,15 @@ void r100_bandwidth_update(struct radeon_device *rdev) + + if (mode1) { + temp_ff.full = read_return_rate.full - disp_drain_rate.full; +- time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); ++ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); + } else { + time_disp1_drop_priority.full = 0; + } + crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; +- crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); +- crit_point_ff.full += rfixed_const_half(0); ++ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); ++ crit_point_ff.full += dfixed_const_half(0); + +- critical_point2 = rfixed_trunc(crit_point_ff); ++ critical_point2 = dfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point2 = 0; +@@ -3399,7 +3717,7 @@ static int r100_startup(struct radeon_device *rdev) + /* Resume clock */ + r100_clock_startup(rdev); + /* Initialize GPU configuration (# pipes, ...) */ +- r100_gpu_init(rdev); ++// r100_gpu_init(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + r100_enable_bm(rdev); +@@ -3436,7 +3754,7 @@ int r100_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + r100_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -3462,7 +3780,6 @@ int r100_suspend(struct radeon_device *rdev) + + void r100_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -3505,7 +3822,7 @@ int r100_init(struct radeon_device *rdev) + return r; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -3518,8 +3835,6 @@ int r100_init(struct radeon_device *rdev) + r100_errata(rdev); + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); +diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h +index df29a63..d016b16 100644 +--- a/drivers/gpu/drm/radeon/r100d.h ++++ b/drivers/gpu/drm/radeon/r100d.h +@@ -74,6 +74,134 @@ + #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) + + /* Registers */ ++#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 ++#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) ++#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) ++#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE ++#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) ++#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) ++#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD ++#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2) ++#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1) ++#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB ++#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) ++#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) ++#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 ++#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) ++#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) ++#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF ++#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) ++#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) ++#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF ++#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) ++#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) ++#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF ++#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) ++#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) ++#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F ++#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) ++#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) ++#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF ++#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) ++#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) ++#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF ++#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) ++#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) ++#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF ++#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) ++#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) ++#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF ++#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) ++#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) ++#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF ++#define R_000030_BUS_CNTL 0x000030 ++#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0) ++#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1) ++#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE ++#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1) ++#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1) ++#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD ++#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2) ++#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1) ++#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB ++#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3) ++#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1) ++#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7 ++#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4) ++#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1) ++#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF ++#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5) ++#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1) ++#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF ++#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6) ++#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1) ++#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF ++#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7) ++#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1) ++#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F ++#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8) ++#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1) ++#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF ++#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9) ++#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1) ++#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF ++#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10) ++#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1) ++#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF ++#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11) ++#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1) ++#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF ++#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12) ++#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1) ++#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF ++#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13) ++#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1) ++#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF ++#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14) ++#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1) ++#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF ++#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15) ++#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1) ++#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF ++#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16) ++#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF) ++#define C_000030_BUS_RETRY_WS 0xFFF0FFFF ++#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20) ++#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1) ++#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF ++#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21) ++#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1) ++#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF ++#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22) ++#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1) ++#define C_000030_BUS_SUSPEND 0xFFBFFFFF ++#define S_000030_LAT_16X(x) (((x) & 0x1) << 23) ++#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1) ++#define C_000030_LAT_16X 0xFF7FFFFF ++#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24) ++#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1) ++#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF ++#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25) ++#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1) ++#define C_000030_ENFRCWRDY 0xFDFFFFFF ++#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26) ++#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1) ++#define C_000030_BUS_MSTR_WS 0xFBFFFFFF ++#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27) ++#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1) ++#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF ++#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28) ++#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1) ++#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF ++#define S_000030_SERR_EN(x) (((x) & 0x1) << 29) ++#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1) ++#define C_000030_SERR_EN 0xDFFFFFFF ++#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30) ++#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1) ++#define C_000030_BUS_READ_BURST 0xBFFFFFFF ++#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31) ++#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1) ++#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF + #define R_000040_GEN_INT_CNTL 0x000040 + #define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0) + #define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1) +@@ -710,5 +838,41 @@ + #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) + #define C_00000D_FORCE_RB 0xEFFFFFFF + ++/* PLL regs */ ++#define SCLK_CNTL 0xd ++#define FORCE_HDP (1 << 17) ++#define CLK_PWRMGT_CNTL 0x14 ++#define GLOBAL_PMAN_EN (1 << 10) ++#define DISP_PM (1 << 20) ++#define PLL_PWRMGT_CNTL 0x15 ++#define MPLL_TURNOFF (1 << 0) ++#define SPLL_TURNOFF (1 << 1) ++#define PPLL_TURNOFF (1 << 2) ++#define P2PLL_TURNOFF (1 << 3) ++#define TVPLL_TURNOFF (1 << 4) ++#define MOBILE_SU (1 << 16) ++#define SU_SCLK_USE_BCLK (1 << 17) ++#define SCLK_CNTL2 0x1e ++#define REDUCED_SPEED_SCLK_MODE (1 << 16) ++#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17) ++#define MCLK_MISC 0x1f ++#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18) ++#define SCLK_MORE_CNTL 0x35 ++#define REDUCED_SPEED_SCLK_EN (1 << 16) ++#define IO_CG_VOLTAGE_DROP (1 << 17) ++#define VOLTAGE_DELAY_SEL(x) ((x) << 20) ++#define VOLTAGE_DROP_SYNC (1 << 19) ++ ++/* mmreg */ ++#define DISP_PWR_MAN 0xd08 ++#define DISP_D3_GRPH_RST (1 << 18) ++#define DISP_D3_SUBPIC_RST (1 << 19) ++#define DISP_D3_OV0_RST (1 << 20) ++#define DISP_D1D2_GRPH_RST (1 << 21) ++#define DISP_D1D2_SUBPIC_RST (1 << 22) ++#define DISP_D1D2_OV0_RST (1 << 23) ++#define DISP_DVO_ENABLE_RST (1 << 24) ++#define TV_ENABLE_RST (1 << 25) ++#define AUTO_PWRUP_EN (1 << 26) + + #endif +diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c +index a5ff807..b2f9efe 100644 +--- a/drivers/gpu/drm/radeon/r300.c ++++ b/drivers/gpu/drm/radeon/r300.c +@@ -27,8 +27,9 @@ + */ + #include + #include +-#include "drmP.h" +-#include "drm.h" ++#include ++#include ++#include + #include "radeon_reg.h" + #include "radeon.h" + #include "radeon_asic.h" +@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) + u32 tmp; + int r; + ++ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); ++ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); ++ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); ++ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); + tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); + tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; + WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); +@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev) + { + uint32_t gb_tile_config, tmp; + +- r100_hdp_reset(rdev); + if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || + (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { + /* r300,r350 */ +@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev) + rdev->num_gb_pipes, rdev->num_z_pipes); + } + +-int r300_ga_reset(struct radeon_device *rdev) ++bool r300_gpu_is_lockup(struct radeon_device *rdev) + { +- uint32_t tmp; +- bool reinit_cp; +- int i; ++ u32 rbbm_status; ++ int r; + +- reinit_cp = rdev->cp.ready; +- rdev->cp.ready = false; +- for (i = 0; i < rdev->usec_timeout; i++) { +- WREG32(RADEON_CP_CSQ_MODE, 0); +- WREG32(RADEON_CP_CSQ_CNTL, 0); +- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); +- (void)RREG32(RADEON_RBBM_SOFT_RESET); +- udelay(200); +- WREG32(RADEON_RBBM_SOFT_RESET, 0); +- /* Wait to prevent race in RBBM_STATUS */ +- mdelay(1); +- tmp = RREG32(RADEON_RBBM_STATUS); +- if (tmp & ((1 << 20) | (1 << 26))) { +- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp); +- /* GA still busy soft reset it */ +- WREG32(0x429C, 0x200); +- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); +- WREG32(R300_RE_SCISSORS_TL, 0); +- WREG32(R300_RE_SCISSORS_BR, 0); +- WREG32(0x24AC, 0); +- } +- /* Wait to prevent race in RBBM_STATUS */ +- mdelay(1); +- tmp = RREG32(RADEON_RBBM_STATUS); +- if (!(tmp & ((1 << 20) | (1 << 26)))) { +- break; +- } ++ rbbm_status = RREG32(R_000E40_RBBM_STATUS); ++ if (!G_000E40_GUI_ACTIVE(rbbm_status)) { ++ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); ++ return false; + } +- for (i = 0; i < rdev->usec_timeout; i++) { +- tmp = RREG32(RADEON_RBBM_STATUS); +- if (!(tmp & ((1 << 20) | (1 << 26)))) { +- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", +- tmp); +- if (reinit_cp) { +- return r100_cp_init(rdev, rdev->cp.ring_size); +- } +- return 0; +- } +- DRM_UDELAY(1); ++ /* force CP activities */ ++ r = radeon_ring_lock(rdev, 2); ++ if (!r) { ++ /* PACKET2 NOP */ ++ radeon_ring_write(rdev, 0x80000000); ++ radeon_ring_write(rdev, 0x80000000); ++ radeon_ring_unlock_commit(rdev); + } +- tmp = RREG32(RADEON_RBBM_STATUS); +- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); +- return -1; ++ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); ++ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); + } + +-int r300_gpu_reset(struct radeon_device *rdev) ++int r300_asic_reset(struct radeon_device *rdev) + { +- uint32_t status; +- +- /* reset order likely matter */ +- status = RREG32(RADEON_RBBM_STATUS); +- /* reset HDP */ +- r100_hdp_reset(rdev); +- /* reset rb2d */ +- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { +- r100_rb2d_reset(rdev); +- } +- /* reset GA */ +- if (status & ((1 << 20) | (1 << 26))) { +- r300_ga_reset(rdev); +- } +- /* reset CP */ +- status = RREG32(RADEON_RBBM_STATUS); +- if (status & (1 << 16)) { +- r100_cp_reset(rdev); ++ struct r100_mc_save save; ++ u32 status, tmp; ++ ++ r100_mc_stop(rdev, &save); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ if (!G_000E40_GUI_ACTIVE(status)) { ++ return 0; + } ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* stop CP */ ++ WREG32(RADEON_CP_CSQ_CNTL, 0); ++ tmp = RREG32(RADEON_CP_RB_CNTL); ++ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); ++ WREG32(RADEON_CP_RB_RPTR_WR, 0); ++ WREG32(RADEON_CP_RB_WPTR, 0); ++ WREG32(RADEON_CP_RB_CNTL, tmp); ++ /* save PCI state */ ++ pci_save_state(rdev->pdev); ++ /* disable bus mastering */ ++ r100_bm_disable(rdev); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | ++ S_0000F0_SOFT_RESET_GA(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* resetting the CP seems to be problematic sometimes it end up ++ * hard locking the computer, but it's necessary for successfull ++ * reset more test & playing is needed on R3XX/R4XX to find a ++ * reliable (if any solution) ++ */ ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* restore PCI & busmastering */ ++ pci_restore_state(rdev->pdev); ++ r100_enable_bm(rdev); + /* Check if GPU is idle */ +- status = RREG32(RADEON_RBBM_STATUS); +- if (status & RADEON_RBBM_ACTIVE) { +- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); ++ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { ++ dev_err(rdev->dev, "failed to reset GPU\n"); ++ rdev->gpu_lockup = true; + return -1; + } +- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); ++ r100_mc_resume(rdev, &save); ++ dev_info(rdev->dev, "GPU reset succeed\n"); + return 0; + } + +- + /* + * r300,r350,rv350,rv380 VRAM info + */ +@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + r300_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev) + + void r300_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev) + return r; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev) + r300_errata(rdev); + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); +diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h +index 4c73114..968a333 100644 +--- a/drivers/gpu/drm/radeon/r300d.h ++++ b/drivers/gpu/drm/radeon/r300d.h +@@ -209,7 +209,52 @@ + #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) + #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) + #define C_000E40_GUI_ACTIVE 0x7FFFFFFF +- ++#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 ++#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) ++#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) ++#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE ++#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) ++#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) ++#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD ++#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) ++#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) ++#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB ++#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) ++#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) ++#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 ++#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) ++#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) ++#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF ++#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) ++#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) ++#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF ++#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) ++#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) ++#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF ++#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) ++#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) ++#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F ++#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) ++#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) ++#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF ++#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) ++#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) ++#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF ++#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) ++#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) ++#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF ++#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) ++#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) ++#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF ++#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) ++#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) ++#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF ++#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) ++#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) ++#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF ++#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) ++#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) ++#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF + + #define R_00000D_SCLK_CNTL 0x00000D + #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) +diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c +index c2bda4a..e6c8914 100644 +--- a/drivers/gpu/drm/radeon/r420.c ++++ b/drivers/gpu/drm/radeon/r420.c +@@ -36,6 +36,45 @@ + #include "r420d.h" + #include "r420_reg_safe.h" + ++void r420_pm_init_profile(struct radeon_device *rdev) ++{ ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* mid mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; ++} ++ + static void r420_set_reg_safe(struct radeon_device *rdev) + { + rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; +@@ -241,7 +280,7 @@ int r420_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + r420_clock_resume(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -274,7 +313,6 @@ int r420_suspend(struct radeon_device *rdev) + + void r420_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -322,7 +360,7 @@ int r420_init(struct radeon_device *rdev) + } + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -334,8 +372,6 @@ int r420_init(struct radeon_device *rdev) + + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); +diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h +index 0cf2ad2..93c9a2b 100644 +--- a/drivers/gpu/drm/radeon/r500_reg.h ++++ b/drivers/gpu/drm/radeon/r500_reg.h +@@ -347,9 +347,11 @@ + + #define AVIVO_D1CRTC_CONTROL 0x6080 + # define AVIVO_CRTC_EN (1 << 0) ++# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) + #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 + #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 + #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c ++#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 + #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 + #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 + +@@ -488,6 +490,7 @@ + #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 + #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 + #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c ++#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0 + #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 + #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 + +diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c +index 3c44b8d..34330df 100644 +--- a/drivers/gpu/drm/radeon/r520.c ++++ b/drivers/gpu/drm/radeon/r520.c +@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev) + { + unsigned pipe_select_current, gb_pipe_select, tmp; + +- r100_hdp_reset(rdev); + rv515_vga_render_disable(rdev); + /* + * DST_PIPE_CONFIG 0x170C +@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + rv515_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev) + return -EINVAL; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev) + } + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c +index 8f3454e..0e91871 100644 +--- a/drivers/gpu/drm/radeon/r600.c ++++ b/drivers/gpu/drm/radeon/r600.c +@@ -44,6 +44,9 @@ + #define R700_PFP_UCODE_SIZE 848 + #define R700_PM4_UCODE_SIZE 1360 + #define R700_RLC_UCODE_SIZE 1024 ++#define EVERGREEN_PFP_UCODE_SIZE 1120 ++#define EVERGREEN_PM4_UCODE_SIZE 1376 ++#define EVERGREEN_RLC_UCODE_SIZE 768 + + /* Firmware Names */ + MODULE_FIRMWARE("radeon/R600_pfp.bin"); +@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin"); + MODULE_FIRMWARE("radeon/RV710_me.bin"); + MODULE_FIRMWARE("radeon/R600_rlc.bin"); + MODULE_FIRMWARE("radeon/R700_rlc.bin"); ++MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); ++MODULE_FIRMWARE("radeon/CEDAR_me.bin"); ++MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); ++MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); ++MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); ++MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); ++MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); ++MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); ++MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); ++MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); ++MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); ++MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); + + int r600_debugfs_mc_info_init(struct radeon_device *rdev); + +@@ -75,6 +90,494 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev); + int r600_mc_wait_for_idle(struct radeon_device *rdev); + void r600_gpu_init(struct radeon_device *rdev); + void r600_fini(struct radeon_device *rdev); ++void r600_irq_disable(struct radeon_device *rdev); ++ ++void r600_pm_get_dynpm_state(struct radeon_device *rdev) ++{ ++ int i; ++ ++ rdev->pm.dynpm_can_upclock = true; ++ rdev->pm.dynpm_can_downclock = true; ++ ++ /* power state array is low to high, default is first */ ++ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { ++ int min_power_state_index = 0; ++ ++ if (rdev->pm.num_power_states > 2) ++ min_power_state_index = 1; ++ ++ switch (rdev->pm.dynpm_planned_action) { ++ case DYNPM_ACTION_MINIMUM: ++ rdev->pm.requested_power_state_index = min_power_state_index; ++ rdev->pm.requested_clock_mode_index = 0; ++ rdev->pm.dynpm_can_downclock = false; ++ break; ++ case DYNPM_ACTION_DOWNCLOCK: ++ if (rdev->pm.current_power_state_index == min_power_state_index) { ++ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; ++ rdev->pm.dynpm_can_downclock = false; ++ } else { ++ if (rdev->pm.active_crtc_count > 1) { ++ for (i = 0; i < rdev->pm.num_power_states; i++) { ++ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) ++ continue; ++ else if (i >= rdev->pm.current_power_state_index) { ++ rdev->pm.requested_power_state_index = ++ rdev->pm.current_power_state_index; ++ break; ++ } else { ++ rdev->pm.requested_power_state_index = i; ++ break; ++ } ++ } ++ } else ++ rdev->pm.requested_power_state_index = ++ rdev->pm.current_power_state_index - 1; ++ } ++ rdev->pm.requested_clock_mode_index = 0; ++ /* don't use the power state if crtcs are active and no display flag is set */ ++ if ((rdev->pm.active_crtc_count > 0) && ++ (rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].flags & ++ RADEON_PM_MODE_NO_DISPLAY)) { ++ rdev->pm.requested_power_state_index++; ++ } ++ break; ++ case DYNPM_ACTION_UPCLOCK: ++ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { ++ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; ++ rdev->pm.dynpm_can_upclock = false; ++ } else { ++ if (rdev->pm.active_crtc_count > 1) { ++ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { ++ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) ++ continue; ++ else if (i <= rdev->pm.current_power_state_index) { ++ rdev->pm.requested_power_state_index = ++ rdev->pm.current_power_state_index; ++ break; ++ } else { ++ rdev->pm.requested_power_state_index = i; ++ break; ++ } ++ } ++ } else ++ rdev->pm.requested_power_state_index = ++ rdev->pm.current_power_state_index + 1; ++ } ++ rdev->pm.requested_clock_mode_index = 0; ++ break; ++ case DYNPM_ACTION_DEFAULT: ++ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; ++ rdev->pm.requested_clock_mode_index = 0; ++ rdev->pm.dynpm_can_upclock = false; ++ break; ++ case DYNPM_ACTION_NONE: ++ default: ++ DRM_ERROR("Requested mode for not defined action\n"); ++ return; ++ } ++ } else { ++ /* XXX select a power state based on AC/DC, single/dualhead, etc. */ ++ /* for now just select the first power state and switch between clock modes */ ++ /* power state array is low to high, default is first (0) */ ++ if (rdev->pm.active_crtc_count > 1) { ++ rdev->pm.requested_power_state_index = -1; ++ /* start at 1 as we don't want the default mode */ ++ for (i = 1; i < rdev->pm.num_power_states; i++) { ++ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) ++ continue; ++ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || ++ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { ++ rdev->pm.requested_power_state_index = i; ++ break; ++ } ++ } ++ /* if nothing selected, grab the default state. */ ++ if (rdev->pm.requested_power_state_index == -1) ++ rdev->pm.requested_power_state_index = 0; ++ } else ++ rdev->pm.requested_power_state_index = 1; ++ ++ switch (rdev->pm.dynpm_planned_action) { ++ case DYNPM_ACTION_MINIMUM: ++ rdev->pm.requested_clock_mode_index = 0; ++ rdev->pm.dynpm_can_downclock = false; ++ break; ++ case DYNPM_ACTION_DOWNCLOCK: ++ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { ++ if (rdev->pm.current_clock_mode_index == 0) { ++ rdev->pm.requested_clock_mode_index = 0; ++ rdev->pm.dynpm_can_downclock = false; ++ } else ++ rdev->pm.requested_clock_mode_index = ++ rdev->pm.current_clock_mode_index - 1; ++ } else { ++ rdev->pm.requested_clock_mode_index = 0; ++ rdev->pm.dynpm_can_downclock = false; ++ } ++ /* don't use the power state if crtcs are active and no display flag is set */ ++ if ((rdev->pm.active_crtc_count > 0) && ++ (rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].flags & ++ RADEON_PM_MODE_NO_DISPLAY)) { ++ rdev->pm.requested_clock_mode_index++; ++ } ++ break; ++ case DYNPM_ACTION_UPCLOCK: ++ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { ++ if (rdev->pm.current_clock_mode_index == ++ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { ++ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; ++ rdev->pm.dynpm_can_upclock = false; ++ } else ++ rdev->pm.requested_clock_mode_index = ++ rdev->pm.current_clock_mode_index + 1; ++ } else { ++ rdev->pm.requested_clock_mode_index = ++ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; ++ rdev->pm.dynpm_can_upclock = false; ++ } ++ break; ++ case DYNPM_ACTION_DEFAULT: ++ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; ++ rdev->pm.requested_clock_mode_index = 0; ++ rdev->pm.dynpm_can_upclock = false; ++ break; ++ case DYNPM_ACTION_NONE: ++ default: ++ DRM_ERROR("Requested mode for not defined action\n"); ++ return; ++ } ++ } ++ ++ DRM_DEBUG("Requested: e: %d m: %d p: %d\n", ++ rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].sclk, ++ rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].mclk, ++ rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ pcie_lanes); ++} ++ ++static int r600_pm_get_type_index(struct radeon_device *rdev, ++ enum radeon_pm_state_type ps_type, ++ int instance) ++{ ++ int i; ++ int found_instance = -1; ++ ++ for (i = 0; i < rdev->pm.num_power_states; i++) { ++ if (rdev->pm.power_state[i].type == ps_type) { ++ found_instance++; ++ if (found_instance == instance) ++ return i; ++ } ++ } ++ /* return default if no match */ ++ return rdev->pm.default_power_state_index; ++} ++ ++void rs780_pm_init_profile(struct radeon_device *rdev) ++{ ++ if (rdev->pm.num_power_states == 2) { ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* mid mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; ++ } else if (rdev->pm.num_power_states == 3) { ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* mid mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; ++ } else { ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* mid mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; ++ } ++} ++ ++void r600_pm_init_profile(struct radeon_device *rdev) ++{ ++ if (rdev->family == CHIP_R600) { ++ /* XXX */ ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* mid mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; ++ } else { ++ if (rdev->pm.num_power_states < 4) { ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; ++ /* low sh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ /* mid sh */ ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ /* low mh */ ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; ++ } else { ++ /* default */ ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; ++ /* low sh */ ++ if (rdev->flags & RADEON_IS_MOBILITY) { ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ } else { ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; ++ } ++ /* mid sh */ ++ if (rdev->flags & RADEON_IS_MOBILITY) { ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; ++ } else { ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; ++ } ++ /* high sh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; ++ /* low mh */ ++ if (rdev->flags & RADEON_IS_MOBILITY) { ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ } else { ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; ++ } ++ /* mid mh */ ++ if (rdev->flags & RADEON_IS_MOBILITY) { ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; ++ } else { ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; ++ } ++ /* high mh */ ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = ++ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; ++ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; ++ } ++ } ++} ++ ++void r600_pm_misc(struct radeon_device *rdev) ++{ ++ int req_ps_idx = rdev->pm.requested_power_state_index; ++ int req_cm_idx = rdev->pm.requested_clock_mode_index; ++ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; ++ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; ++ ++ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { ++ if (voltage->voltage != rdev->pm.current_vddc) { ++ radeon_atom_set_voltage(rdev, voltage->voltage); ++ rdev->pm.current_vddc = voltage->voltage; ++ DRM_DEBUG("Setting: v: %d\n", voltage->voltage); ++ } ++ } ++} ++ ++bool r600_gui_idle(struct radeon_device *rdev) ++{ ++ if (RREG32(GRBM_STATUS) & GUI_ACTIVE) ++ return false; ++ else ++ return true; ++} + + /* hpd for digital panel detect/disconnect */ + bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) +@@ -714,11 +1217,6 @@ int r600_mc_init(struct radeon_device *rdev) + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); + rdev->mc.visible_vram_size = rdev->mc.aper_size; +- /* FIXME remove this once we support unmappable VRAM */ +- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { +- rdev->mc.mc_vram_size = rdev->mc.aper_size; +- rdev->mc.real_vram_size = rdev->mc.aper_size; +- } + r600_vram_gtt_location(rdev, &rdev->mc); + + if (rdev->flags & RADEON_IS_IGP) +@@ -750,7 +1248,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) + S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | + S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | + S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); +- u32 srbm_reset = 0; + u32 tmp; + + dev_info(rdev->dev, "GPU softreset \n"); +@@ -765,7 +1262,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + /* Disable CP parsing/prefetching */ +- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); ++ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); + /* Check if any of the rendering block is busy and reset it */ + if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || + (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { +@@ -784,72 +1281,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) + S_008020_SOFT_RESET_VGT(1); + dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(R_008020_GRBM_SOFT_RESET, tmp); +- (void)RREG32(R_008020_GRBM_SOFT_RESET); +- udelay(50); ++ RREG32(R_008020_GRBM_SOFT_RESET); ++ mdelay(15); + WREG32(R_008020_GRBM_SOFT_RESET, 0); +- (void)RREG32(R_008020_GRBM_SOFT_RESET); + } + /* Reset CP (we always reset CP) */ + tmp = S_008020_SOFT_RESET_CP(1); + dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(R_008020_GRBM_SOFT_RESET, tmp); +- (void)RREG32(R_008020_GRBM_SOFT_RESET); +- udelay(50); ++ RREG32(R_008020_GRBM_SOFT_RESET); ++ mdelay(15); + WREG32(R_008020_GRBM_SOFT_RESET, 0); +- (void)RREG32(R_008020_GRBM_SOFT_RESET); +- /* Reset others GPU block if necessary */ +- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_RLC(1); +- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1); +- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_IH(1); +- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_VMC(1); +- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_MC(1); +- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_MC(1); +- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_MC(1); +- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_MC(1); +- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_MC(1); +- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_RLC(1); +- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_SEM(1); +- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) +- srbm_reset |= S_000E60_SOFT_RESET_BIF(1); +- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); +- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); +- (void)RREG32(R_000E60_SRBM_SOFT_RESET); +- udelay(50); +- WREG32(R_000E60_SRBM_SOFT_RESET, 0); +- (void)RREG32(R_000E60_SRBM_SOFT_RESET); +- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); +- (void)RREG32(R_000E60_SRBM_SOFT_RESET); +- udelay(50); +- WREG32(R_000E60_SRBM_SOFT_RESET, 0); +- (void)RREG32(R_000E60_SRBM_SOFT_RESET); + /* Wait a little for things to settle down */ +- udelay(50); ++ mdelay(1); + dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", + RREG32(R_008010_GRBM_STATUS)); + dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", + RREG32(R_008014_GRBM_STATUS2)); + dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", + RREG32(R_000E50_SRBM_STATUS)); +- /* After reset we need to reinit the asic as GPU often endup in an +- * incoherent state. +- */ +- atom_asic_init(rdev->mode_info.atom_context); + rv515_mc_resume(rdev, &save); + return 0; + } + +-int r600_gpu_reset(struct radeon_device *rdev) ++bool r600_gpu_is_lockup(struct radeon_device *rdev) ++{ ++ u32 srbm_status; ++ u32 grbm_status; ++ u32 grbm_status2; ++ int r; ++ ++ srbm_status = RREG32(R_000E50_SRBM_STATUS); ++ grbm_status = RREG32(R_008010_GRBM_STATUS); ++ grbm_status2 = RREG32(R_008014_GRBM_STATUS2); ++ if (!G_008010_GUI_ACTIVE(grbm_status)) { ++ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); ++ return false; ++ } ++ /* force CP activities */ ++ r = radeon_ring_lock(rdev, 2); ++ if (!r) { ++ /* PACKET2 NOP */ ++ radeon_ring_write(rdev, 0x80000000); ++ radeon_ring_write(rdev, 0x80000000); ++ radeon_ring_unlock_commit(rdev); ++ } ++ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); ++ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); ++} ++ ++int r600_asic_reset(struct radeon_device *rdev) + { + return r600_gpu_soft_reset(rdev); + } +@@ -1467,10 +1948,31 @@ int r600_init_microcode(struct radeon_device *rdev) + chip_name = "RV710"; + rlc_chip_name = "R700"; + break; ++ case CHIP_CEDAR: ++ chip_name = "CEDAR"; ++ rlc_chip_name = "CEDAR"; ++ break; ++ case CHIP_REDWOOD: ++ chip_name = "REDWOOD"; ++ rlc_chip_name = "REDWOOD"; ++ break; ++ case CHIP_JUNIPER: ++ chip_name = "JUNIPER"; ++ rlc_chip_name = "JUNIPER"; ++ break; ++ case CHIP_CYPRESS: ++ case CHIP_HEMLOCK: ++ chip_name = "CYPRESS"; ++ rlc_chip_name = "CYPRESS"; ++ break; + default: BUG(); + } + +- if (rdev->family >= CHIP_RV770) { ++ if (rdev->family >= CHIP_CEDAR) { ++ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; ++ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; ++ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; ++ } else if (rdev->family >= CHIP_RV770) { + pfp_req_size = R700_PFP_UCODE_SIZE * 4; + me_req_size = R700_PM4_UCODE_SIZE * 4; + rlc_req_size = R700_RLC_UCODE_SIZE * 4; +@@ -1584,12 +2086,15 @@ int r600_cp_start(struct radeon_device *rdev) + } + radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); + radeon_ring_write(rdev, 0x1); +- if (rdev->family < CHIP_RV770) { +- radeon_ring_write(rdev, 0x3); +- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); +- } else { ++ if (rdev->family >= CHIP_CEDAR) { ++ radeon_ring_write(rdev, 0x0); ++ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); ++ } else if (rdev->family >= CHIP_RV770) { + radeon_ring_write(rdev, 0x0); + radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); ++ } else { ++ radeon_ring_write(rdev, 0x3); ++ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); + } + radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); + radeon_ring_write(rdev, 0); +@@ -2051,8 +2556,6 @@ int r600_init(struct radeon_device *rdev) + r = radeon_clocks_init(rdev); + if (r) + return r; +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) +@@ -2117,7 +2620,6 @@ int r600_init(struct radeon_device *rdev) + + void r600_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r600_audio_fini(rdev); + r600_blit_fini(rdev); + r600_cp_fini(rdev); +@@ -2290,10 +2792,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev) + } + } + +-static void r600_rlc_stop(struct radeon_device *rdev) ++void r600_rlc_stop(struct radeon_device *rdev) + { + +- if (rdev->family >= CHIP_RV770) { ++ if ((rdev->family >= CHIP_RV770) && ++ (rdev->family <= CHIP_RV740)) { + /* r7xx asics need to soft reset RLC before halting */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); + RREG32(SRBM_SOFT_RESET); +@@ -2330,7 +2833,12 @@ static int r600_rlc_init(struct radeon_device *rdev) + WREG32(RLC_UCODE_CNTL, 0); + + fw_data = (const __be32 *)rdev->rlc_fw->data; +- if (rdev->family >= CHIP_RV770) { ++ if (rdev->family >= CHIP_CEDAR) { ++ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { ++ WREG32(RLC_UCODE_ADDR, i); ++ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); ++ } ++ } else if (rdev->family >= CHIP_RV770) { + for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); +@@ -2360,7 +2868,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev) + rdev->ih.enabled = true; + } + +-static void r600_disable_interrupts(struct radeon_device *rdev) ++void r600_disable_interrupts(struct radeon_device *rdev) + { + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + u32 ih_cntl = RREG32(IH_CNTL); +@@ -2475,7 +2983,10 @@ int r600_irq_init(struct radeon_device *rdev) + WREG32(IH_CNTL, ih_cntl); + + /* force the active interrupt state to all disabled */ +- r600_disable_interrupt_state(rdev); ++ if (rdev->family >= CHIP_CEDAR) ++ evergreen_disable_interrupt_state(rdev); ++ else ++ r600_disable_interrupt_state(rdev); + + /* enable irqs */ + r600_enable_interrupts(rdev); +@@ -2485,7 +2996,7 @@ int r600_irq_init(struct radeon_device *rdev) + + void r600_irq_suspend(struct radeon_device *rdev) + { +- r600_disable_interrupts(rdev); ++ r600_irq_disable(rdev); + r600_rlc_stop(rdev); + } + +@@ -2500,6 +3011,8 @@ int r600_irq_set(struct radeon_device *rdev) + u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 mode_int = 0; + u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; ++ u32 grbm_int_cntl = 0; ++ u32 hdmi1, hdmi2; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); +@@ -2513,7 +3026,9 @@ int r600_irq_set(struct radeon_device *rdev) + return 0; + } + ++ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; + if (ASIC_IS_DCE3(rdev)) { ++ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; +@@ -2523,6 +3038,7 @@ int r600_irq_set(struct radeon_device *rdev) + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + } + } else { ++ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; + hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; +@@ -2564,10 +3080,25 @@ int r600_irq_set(struct radeon_device *rdev) + DRM_DEBUG("r600_irq_set: hpd 6\n"); + hpd6 |= DC_HPDx_INT_EN; + } ++ if (rdev->irq.hdmi[0]) { ++ DRM_DEBUG("r600_irq_set: hdmi 1\n"); ++ hdmi1 |= R600_HDMI_INT_EN; ++ } ++ if (rdev->irq.hdmi[1]) { ++ DRM_DEBUG("r600_irq_set: hdmi 2\n"); ++ hdmi2 |= R600_HDMI_INT_EN; ++ } ++ if (rdev->irq.gui_idle) { ++ DRM_DEBUG("gui idle\n"); ++ grbm_int_cntl |= GUI_IDLE_INT_ENABLE; ++ } + + WREG32(CP_INT_CNTL, cp_int_cntl); + WREG32(DxMODE_INT_MASK, mode_int); ++ WREG32(GRBM_INT_CNTL, grbm_int_cntl); ++ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); + if (ASIC_IS_DCE3(rdev)) { ++ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); +@@ -2577,6 +3108,7 @@ int r600_irq_set(struct radeon_device *rdev) + WREG32(DC_HPD6_INT_CONTROL, hpd6); + } + } else { ++ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2); + WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); + WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); + WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); +@@ -2660,6 +3192,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, + WREG32(DC_HPD6_INT_CONTROL, tmp); + } + } ++ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { ++ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); ++ } ++ if (ASIC_IS_DCE3(rdev)) { ++ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { ++ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); ++ } ++ } else { ++ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { ++ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); ++ } ++ } + } + + void r600_irq_disable(struct radeon_device *rdev) +@@ -2713,6 +3257,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) + * 19 1 FP Hot plug detection B + * 19 2 DAC A auto-detection + * 19 3 DAC B auto-detection ++ * 21 4 HDMI block A ++ * 21 5 HDMI block B + * 176 - CP_INT RB + * 177 - CP_INT IB1 + * 178 - CP_INT IB2 +@@ -2852,6 +3398,10 @@ restart_ih: + break; + } + break; ++ case 21: /* HDMI */ ++ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data); ++ r600_audio_schedule_polling(rdev); ++ break; + case 176: /* CP_INT in ring buffer */ + case 177: /* CP_INT in IB1 */ + case 178: /* CP_INT in IB2 */ +@@ -2861,6 +3411,11 @@ restart_ih: + case 181: /* CP EOP event */ + DRM_DEBUG("IH: CP EOP\n"); + break; ++ case 233: /* GUI IDLE */ ++ DRM_DEBUG("IH: CP EOP\n"); ++ rdev->pm.gui_idle = true; ++ wake_up(&rdev->irq.idle_queue); ++ break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; +diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c +index 1d89805..2b26553 100644 +--- a/drivers/gpu/drm/radeon/r600_audio.c ++++ b/drivers/gpu/drm/radeon/r600_audio.c +@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev) + /* + * current number of channels + */ +-static int r600_audio_channels(struct radeon_device *rdev) ++int r600_audio_channels(struct radeon_device *rdev) + { + return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; + } +@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev) + /* + * current bits per sample + */ +-static int r600_audio_bits_per_sample(struct radeon_device *rdev) ++int r600_audio_bits_per_sample(struct radeon_device *rdev) + { + uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4; + switch (value) { +@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev) + /* + * current sampling rate in HZ + */ +-static int r600_audio_rate(struct radeon_device *rdev) ++int r600_audio_rate(struct radeon_device *rdev) + { + uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); + uint32_t result; +@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev) + /* + * iec 60958 status bits + */ +-static uint8_t r600_audio_status_bits(struct radeon_device *rdev) ++uint8_t r600_audio_status_bits(struct radeon_device *rdev) + { + return RREG32(R600_AUDIO_STATUS_BITS) & 0xff; + } +@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev) + /* + * iec 60958 category code + */ +-static uint8_t r600_audio_category_code(struct radeon_device *rdev) ++uint8_t r600_audio_category_code(struct radeon_device *rdev) + { + return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; + } + + /* ++ * schedule next audio update event ++ */ ++void r600_audio_schedule_polling(struct radeon_device *rdev) ++{ ++ mod_timer(&rdev->audio_timer, ++ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); ++} ++ ++/* + * update all hdmi interfaces with current audio parameters + */ + static void r600_audio_update_hdmi(unsigned long param) +@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param) + uint8_t category_code = r600_audio_category_code(rdev); + + struct drm_encoder *encoder; +- int changes = 0; ++ int changes = 0, still_going = 0; + + changes |= channels != rdev->audio_channels; + changes |= rate != rdev->audio_rate; +@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param) + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ still_going |= radeon_encoder->audio_polling_active; + if (changes || r600_hdmi_buffer_status_changed(encoder)) +- r600_hdmi_update_audio_settings( +- encoder, channels, +- rate, bps, status_bits, +- category_code); ++ r600_hdmi_update_audio_settings(encoder); + } + +- mod_timer(&rdev->audio_timer, +- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); ++ if(still_going) r600_audio_schedule_polling(rdev); + } + + /* +@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev) + r600_audio_update_hdmi, + (unsigned long)rdev); + ++ return 0; ++} ++ ++/* ++ * enable the polling timer, to check for status changes ++ */ ++void r600_audio_enable_polling(struct drm_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ++ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active); ++ if (radeon_encoder->audio_polling_active) ++ return; ++ ++ radeon_encoder->audio_polling_active = 1; + mod_timer(&rdev->audio_timer, jiffies + 1); ++} + +- return 0; ++/* ++ * disable the polling timer, so we get no more status updates ++ */ ++void r600_audio_disable_polling(struct drm_encoder *encoder) ++{ ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active); ++ radeon_encoder->audio_polling_active = 0; + } + + /* +diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c +index f6c6c77..d13622a 100644 +--- a/drivers/gpu/drm/radeon/r600_blit_kms.c ++++ b/drivers/gpu/drm/radeon/r600_blit_kms.c +@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev) + u32 packet2s[16]; + int num_packet2s = 0; + ++ /* don't reinitialize blit */ ++ if (rdev->r600_blit.shader_obj) ++ return 0; + mutex_init(&rdev->r600_blit.mutex); + rdev->r600_blit.state_offset = 0; + +diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c +index 2616b82..26b4bc9 100644 +--- a/drivers/gpu/drm/radeon/r600_hdmi.c ++++ b/drivers/gpu/drm/radeon/r600_hdmi.c +@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder) + if (!offset) + return; + +- if (r600_hdmi_is_audio_buffer_filled(encoder)) { +- /* disable audio workaround and start delivering of audio frames */ +- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); ++ if (!radeon_encoder->hdmi_audio_workaround || ++ r600_hdmi_is_audio_buffer_filled(encoder)) { + +- } else if (radeon_encoder->hdmi_audio_workaround) { +- /* enable audio workaround and start delivering of audio frames */ +- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); ++ /* disable audio workaround */ ++ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); + + } else { +- /* disable audio workaround and stop delivering of audio frames */ +- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001); ++ /* enable audio workaround */ ++ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); + } + } + +@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod + + /* audio packets per line, does anyone know how to calc this ? */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000); +- +- /* update? reset? don't realy know */ +- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000); + } + + /* + * update settings with current parameters from audio engine + */ +-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, +- int channels, +- int rate, +- int bps, +- uint8_t status_bits, +- uint8_t category_code) ++void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) + { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + ++ int channels = r600_audio_channels(rdev); ++ int rate = r600_audio_rate(rdev); ++ int bps = r600_audio_bits_per_sample(rdev); ++ uint8_t status_bits = r600_audio_status_bits(rdev); ++ uint8_t category_code = r600_audio_category_code(rdev); ++ + uint32_t iec; + + if (!offset) +@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, + r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0); + + r600_hdmi_audio_workaround(encoder); +- +- /* update? reset? don't realy know */ +- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); + } + + static int r600_hdmi_find_free_block(struct drm_device *dev) +@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t offset; + + if (ASIC_IS_DCE4(rdev)) + return; +@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder) + } + } + ++ offset = radeon_encoder->hdmi_offset; + if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { + WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); + } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { +- int offset = radeon_encoder->hdmi_offset; + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); +@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder) + } + } + ++ if (rdev->irq.installed ++ && rdev->family != CHIP_RS600 ++ && rdev->family != CHIP_RS690 ++ && rdev->family != CHIP_RS740) { ++ ++ /* if irq is available use it */ ++ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; ++ radeon_irq_set(rdev); ++ ++ r600_audio_disable_polling(encoder); ++ } else { ++ /* if not fallback to polling */ ++ r600_audio_enable_polling(encoder); ++ } ++ + DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", + radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); + } +@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder) + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ uint32_t offset; + + if (ASIC_IS_DCE4(rdev)) + return; + +- if (!radeon_encoder->hdmi_offset) { ++ offset = radeon_encoder->hdmi_offset; ++ if (!offset) { + dev_err(rdev->dev, "Disabling not enabled HDMI\n"); + return; + } + + DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", +- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); ++ offset, radeon_encoder->encoder_id); ++ ++ /* disable irq */ ++ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false; ++ radeon_irq_set(rdev); ++ ++ /* disable polling */ ++ r600_audio_disable_polling(encoder); + + if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { + WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); + } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { +- int offset = radeon_encoder->hdmi_offset; + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); +diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h +index 7b1d223..d84612a 100644 +--- a/drivers/gpu/drm/radeon/r600_reg.h ++++ b/drivers/gpu/drm/radeon/r600_reg.h +@@ -157,33 +157,36 @@ + #define R600_HDMI_BLOCK3 0x7800 + + /* HDMI registers */ +-#define R600_HDMI_ENABLE 0x00 +-#define R600_HDMI_STATUS 0x04 +-#define R600_HDMI_CNTL 0x08 +-#define R600_HDMI_UNKNOWN_0 0x0C +-#define R600_HDMI_AUDIOCNTL 0x10 +-#define R600_HDMI_VIDEOCNTL 0x14 +-#define R600_HDMI_VERSION 0x18 +-#define R600_HDMI_UNKNOWN_1 0x28 +-#define R600_HDMI_VIDEOINFOFRAME_0 0x54 +-#define R600_HDMI_VIDEOINFOFRAME_1 0x58 +-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c +-#define R600_HDMI_VIDEOINFOFRAME_3 0x60 +-#define R600_HDMI_32kHz_CTS 0xac +-#define R600_HDMI_32kHz_N 0xb0 +-#define R600_HDMI_44_1kHz_CTS 0xb4 +-#define R600_HDMI_44_1kHz_N 0xb8 +-#define R600_HDMI_48kHz_CTS 0xbc +-#define R600_HDMI_48kHz_N 0xc0 +-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc +-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 +-#define R600_HDMI_IEC60958_1 0xd4 +-#define R600_HDMI_IEC60958_2 0xd8 +-#define R600_HDMI_UNKNOWN_2 0xdc +-#define R600_HDMI_AUDIO_DEBUG_0 0xe0 +-#define R600_HDMI_AUDIO_DEBUG_1 0xe4 +-#define R600_HDMI_AUDIO_DEBUG_2 0xe8 +-#define R600_HDMI_AUDIO_DEBUG_3 0xec ++#define R600_HDMI_ENABLE 0x00 ++#define R600_HDMI_STATUS 0x04 ++# define R600_HDMI_INT_PENDING (1 << 29) ++#define R600_HDMI_CNTL 0x08 ++# define R600_HDMI_INT_EN (1 << 28) ++# define R600_HDMI_INT_ACK (1 << 29) ++#define R600_HDMI_UNKNOWN_0 0x0C ++#define R600_HDMI_AUDIOCNTL 0x10 ++#define R600_HDMI_VIDEOCNTL 0x14 ++#define R600_HDMI_VERSION 0x18 ++#define R600_HDMI_UNKNOWN_1 0x28 ++#define R600_HDMI_VIDEOINFOFRAME_0 0x54 ++#define R600_HDMI_VIDEOINFOFRAME_1 0x58 ++#define R600_HDMI_VIDEOINFOFRAME_2 0x5c ++#define R600_HDMI_VIDEOINFOFRAME_3 0x60 ++#define R600_HDMI_32kHz_CTS 0xac ++#define R600_HDMI_32kHz_N 0xb0 ++#define R600_HDMI_44_1kHz_CTS 0xb4 ++#define R600_HDMI_44_1kHz_N 0xb8 ++#define R600_HDMI_48kHz_CTS 0xbc ++#define R600_HDMI_48kHz_N 0xc0 ++#define R600_HDMI_AUDIOINFOFRAME_0 0xcc ++#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 ++#define R600_HDMI_IEC60958_1 0xd4 ++#define R600_HDMI_IEC60958_2 0xd8 ++#define R600_HDMI_UNKNOWN_2 0xdc ++#define R600_HDMI_AUDIO_DEBUG_0 0xe0 ++#define R600_HDMI_AUDIO_DEBUG_1 0xe4 ++#define R600_HDMI_AUDIO_DEBUG_2 0xe8 ++#define R600_HDMI_AUDIO_DEBUG_3 0xec + + /* HDMI additional config base register addresses */ + #define R600_HDMI_CONFIG1 0x7600 +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h +index 034218c..9c8af5f 100644 +--- a/drivers/gpu/drm/radeon/radeon.h ++++ b/drivers/gpu/drm/radeon/radeon.h +@@ -89,16 +89,17 @@ extern int radeon_testing; + extern int radeon_connector_table; + extern int radeon_tv; + extern int radeon_new_pll; +-extern int radeon_dynpm; + extern int radeon_audio; + extern int radeon_disp_priority; + extern int radeon_hw_i2c; ++extern int radeon_pm; + + /* + * Copy from radeon_drv.h so we don't have to include both and have conflicting + * symbol; + */ + #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ ++#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) + /* RADEON_IB_POOL_SIZE must be a power of 2 */ + #define RADEON_IB_POOL_SIZE 16 + #define RADEON_DEBUGFS_MAX_NUM_FILES 32 +@@ -172,8 +173,11 @@ struct radeon_clock { + int radeon_pm_init(struct radeon_device *rdev); + void radeon_pm_fini(struct radeon_device *rdev); + void radeon_pm_compute_clocks(struct radeon_device *rdev); ++void radeon_pm_suspend(struct radeon_device *rdev); ++void radeon_pm_resume(struct radeon_device *rdev); + void radeon_combios_get_power_modes(struct radeon_device *rdev); + void radeon_atombios_get_power_modes(struct radeon_device *rdev); ++void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); + + /* + * Fences. +@@ -182,7 +186,8 @@ struct radeon_fence_driver { + uint32_t scratch_reg; + atomic_t seq; + uint32_t last_seq; +- unsigned long count_timeout; ++ unsigned long last_jiffies; ++ unsigned long last_timeout; + wait_queue_head_t queue; + rwlock_t lock; + struct list_head created; +@@ -197,7 +202,6 @@ struct radeon_fence { + struct list_head list; + /* protected by radeon_fence.lock */ + uint32_t seq; +- unsigned long timeout; + bool emited; + bool signaled; + }; +@@ -259,6 +263,7 @@ struct radeon_bo_list { + unsigned rdomain; + unsigned wdomain; + u32 tiling_flags; ++ bool reserved; + }; + + /* +@@ -371,10 +376,15 @@ struct radeon_irq { + bool installed; + bool sw_int; + /* FIXME: use a define max crtc rather than hardcode it */ +- bool crtc_vblank_int[2]; ++ bool crtc_vblank_int[6]; + wait_queue_head_t vblank_queue; + /* FIXME: use defines for max hpd/dacs */ + bool hpd[6]; ++ bool gui_idle; ++ bool gui_idle_acked; ++ wait_queue_head_t idle_queue; ++ /* FIXME: use defines for max HDMI blocks */ ++ bool hdmi[2]; + spinlock_t sw_lock; + int sw_refcount; + }; +@@ -462,7 +472,9 @@ int radeon_ib_test(struct radeon_device *rdev); + extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); + /* Ring access between begin & end cannot sleep */ + void radeon_ring_free_size(struct radeon_device *rdev); ++int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); + int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); ++void radeon_ring_commit(struct radeon_device *rdev); + void radeon_ring_unlock_commit(struct radeon_device *rdev); + void radeon_ring_unlock_undo(struct radeon_device *rdev); + int radeon_ring_test(struct radeon_device *rdev); +@@ -566,6 +578,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, + */ + int radeon_agp_init(struct radeon_device *rdev); + void radeon_agp_resume(struct radeon_device *rdev); ++void radeon_agp_suspend(struct radeon_device *rdev); + void radeon_agp_fini(struct radeon_device *rdev); + + +@@ -597,17 +610,24 @@ struct radeon_wb { + * Equation between gpu/memory clock and available bandwidth is hw dependent + * (type of memory, bus size, efficiency, ...) + */ +-enum radeon_pm_state { +- PM_STATE_DISABLED, +- PM_STATE_MINIMUM, +- PM_STATE_PAUSED, +- PM_STATE_ACTIVE ++ ++enum radeon_pm_method { ++ PM_METHOD_PROFILE, ++ PM_METHOD_DYNPM, ++}; ++ ++enum radeon_dynpm_state { ++ DYNPM_STATE_DISABLED, ++ DYNPM_STATE_MINIMUM, ++ DYNPM_STATE_PAUSED, ++ DYNPM_STATE_ACTIVE + }; +-enum radeon_pm_action { +- PM_ACTION_NONE, +- PM_ACTION_MINIMUM, +- PM_ACTION_DOWNCLOCK, +- PM_ACTION_UPCLOCK ++enum radeon_dynpm_action { ++ DYNPM_ACTION_NONE, ++ DYNPM_ACTION_MINIMUM, ++ DYNPM_ACTION_DOWNCLOCK, ++ DYNPM_ACTION_UPCLOCK, ++ DYNPM_ACTION_DEFAULT + }; + + enum radeon_voltage_type { +@@ -625,11 +645,28 @@ enum radeon_pm_state_type { + POWER_STATE_TYPE_PERFORMANCE, + }; + +-enum radeon_pm_clock_mode_type { +- POWER_MODE_TYPE_DEFAULT, +- POWER_MODE_TYPE_LOW, +- POWER_MODE_TYPE_MID, +- POWER_MODE_TYPE_HIGH, ++enum radeon_pm_profile_type { ++ PM_PROFILE_DEFAULT, ++ PM_PROFILE_AUTO, ++ PM_PROFILE_LOW, ++ PM_PROFILE_MID, ++ PM_PROFILE_HIGH, ++}; ++ ++#define PM_PROFILE_DEFAULT_IDX 0 ++#define PM_PROFILE_LOW_SH_IDX 1 ++#define PM_PROFILE_MID_SH_IDX 2 ++#define PM_PROFILE_HIGH_SH_IDX 3 ++#define PM_PROFILE_LOW_MH_IDX 4 ++#define PM_PROFILE_MID_MH_IDX 5 ++#define PM_PROFILE_HIGH_MH_IDX 6 ++#define PM_PROFILE_MAX 7 ++ ++struct radeon_pm_profile { ++ int dpms_off_ps_idx; ++ int dpms_on_ps_idx; ++ int dpms_off_cm_idx; ++ int dpms_on_cm_idx; + }; + + struct radeon_voltage { +@@ -646,12 +683,8 @@ struct radeon_voltage { + u32 voltage; + }; + +-struct radeon_pm_non_clock_info { +- /* pcie lanes */ +- int pcie_lanes; +- /* standardized non-clock flags */ +- u32 flags; +-}; ++/* clock mode flags */ ++#define RADEON_PM_MODE_NO_DISPLAY (1 << 0) + + struct radeon_pm_clock_info { + /* memory clock */ +@@ -660,10 +693,13 @@ struct radeon_pm_clock_info { + u32 sclk; + /* voltage info */ + struct radeon_voltage voltage; +- /* standardized clock flags - not sure we'll need these */ ++ /* standardized clock flags */ + u32 flags; + }; + ++/* state flags */ ++#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) ++ + struct radeon_power_state { + enum radeon_pm_state_type type; + /* XXX: use a define for num clock modes */ +@@ -671,9 +707,11 @@ struct radeon_power_state { + /* number of valid clock modes in this power state */ + int num_clock_modes; + struct radeon_pm_clock_info *default_clock_mode; +- /* non clock info about this state */ +- struct radeon_pm_non_clock_info non_clock_info; +- bool voltage_drop_active; ++ /* standardized state flags */ ++ u32 flags; ++ u32 misc; /* vbios specific flags */ ++ u32 misc2; /* vbios specific flags */ ++ int pcie_lanes; /* pcie lanes */ + }; + + /* +@@ -683,14 +721,11 @@ struct radeon_power_state { + + struct radeon_pm { + struct mutex mutex; +- struct delayed_work idle_work; +- enum radeon_pm_state state; +- enum radeon_pm_action planned_action; +- unsigned long action_timeout; +- bool downclocked; +- int active_crtcs; ++ u32 active_crtcs; ++ int active_crtc_count; + int req_vblank; + bool vblank_sync; ++ bool gui_idle; + fixed20_12 max_bandwidth; + fixed20_12 igp_sideport_mclk; + fixed20_12 igp_system_mclk; +@@ -707,12 +742,28 @@ struct radeon_pm { + struct radeon_power_state power_state[8]; + /* number of valid power states */ + int num_power_states; +- struct radeon_power_state *current_power_state; +- struct radeon_pm_clock_info *current_clock_mode; +- struct radeon_power_state *requested_power_state; +- struct radeon_pm_clock_info *requested_clock_mode; +- struct radeon_power_state *default_power_state; ++ int current_power_state_index; ++ int current_clock_mode_index; ++ int requested_power_state_index; ++ int requested_clock_mode_index; ++ int default_power_state_index; ++ u32 current_sclk; ++ u32 current_mclk; ++ u32 current_vddc; + struct radeon_i2c_chan *i2c_bus; ++ /* selected pm method */ ++ enum radeon_pm_method pm_method; ++ /* dynpm power management */ ++ struct delayed_work dynpm_idle_work; ++ enum radeon_dynpm_state dynpm_state; ++ enum radeon_dynpm_action dynpm_planned_action; ++ unsigned long dynpm_action_timeout; ++ bool dynpm_can_upclock; ++ bool dynpm_can_downclock; ++ /* profile-based power management */ ++ enum radeon_pm_profile_type profile; ++ int profile_index; ++ struct radeon_pm_profile profiles[PM_PROFILE_MAX]; + }; + + +@@ -746,7 +797,8 @@ struct radeon_asic { + int (*resume)(struct radeon_device *rdev); + int (*suspend)(struct radeon_device *rdev); + void (*vga_set_state)(struct radeon_device *rdev, bool state); +- int (*gpu_reset)(struct radeon_device *rdev); ++ bool (*gpu_is_lockup)(struct radeon_device *rdev); ++ int (*asic_reset)(struct radeon_device *rdev); + void (*gart_tlb_flush)(struct radeon_device *rdev); + int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); + int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); +@@ -799,44 +851,84 @@ struct radeon_asic { + * through ring. + */ + void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); ++ bool (*gui_idle)(struct radeon_device *rdev); ++ /* power management */ ++ void (*pm_misc)(struct radeon_device *rdev); ++ void (*pm_prepare)(struct radeon_device *rdev); ++ void (*pm_finish)(struct radeon_device *rdev); ++ void (*pm_init_profile)(struct radeon_device *rdev); ++ void (*pm_get_dynpm_state)(struct radeon_device *rdev); + }; + + /* + * Asic structures + */ ++struct r100_gpu_lockup { ++ unsigned long last_jiffies; ++ u32 last_cp_rptr; ++}; ++ + struct r100_asic { +- const unsigned *reg_safe_bm; +- unsigned reg_safe_bm_size; +- u32 hdp_cntl; ++ const unsigned *reg_safe_bm; ++ unsigned reg_safe_bm_size; ++ u32 hdp_cntl; ++ struct r100_gpu_lockup lockup; + }; + + struct r300_asic { +- const unsigned *reg_safe_bm; +- unsigned reg_safe_bm_size; +- u32 resync_scratch; +- u32 hdp_cntl; ++ const unsigned *reg_safe_bm; ++ unsigned reg_safe_bm_size; ++ u32 resync_scratch; ++ u32 hdp_cntl; ++ struct r100_gpu_lockup lockup; + }; + + struct r600_asic { +- unsigned max_pipes; +- unsigned max_tile_pipes; +- unsigned max_simds; +- unsigned max_backends; +- unsigned max_gprs; +- unsigned max_threads; +- unsigned max_stack_entries; +- unsigned max_hw_contexts; +- unsigned max_gs_threads; +- unsigned sx_max_export_size; +- unsigned sx_max_export_pos_size; +- unsigned sx_max_export_smx_size; +- unsigned sq_num_cf_insts; +- unsigned tiling_nbanks; +- unsigned tiling_npipes; +- unsigned tiling_group_size; ++ unsigned max_pipes; ++ unsigned max_tile_pipes; ++ unsigned max_simds; ++ unsigned max_backends; ++ unsigned max_gprs; ++ unsigned max_threads; ++ unsigned max_stack_entries; ++ unsigned max_hw_contexts; ++ unsigned max_gs_threads; ++ unsigned sx_max_export_size; ++ unsigned sx_max_export_pos_size; ++ unsigned sx_max_export_smx_size; ++ unsigned sq_num_cf_insts; ++ unsigned tiling_nbanks; ++ unsigned tiling_npipes; ++ unsigned tiling_group_size; ++ struct r100_gpu_lockup lockup; + }; + + struct rv770_asic { ++ unsigned max_pipes; ++ unsigned max_tile_pipes; ++ unsigned max_simds; ++ unsigned max_backends; ++ unsigned max_gprs; ++ unsigned max_threads; ++ unsigned max_stack_entries; ++ unsigned max_hw_contexts; ++ unsigned max_gs_threads; ++ unsigned sx_max_export_size; ++ unsigned sx_max_export_pos_size; ++ unsigned sx_max_export_smx_size; ++ unsigned sq_num_cf_insts; ++ unsigned sx_num_of_sets; ++ unsigned sc_prim_fifo_size; ++ unsigned sc_hiz_tile_fifo_size; ++ unsigned sc_earlyz_tile_fifo_fize; ++ unsigned tiling_nbanks; ++ unsigned tiling_npipes; ++ unsigned tiling_group_size; ++ struct r100_gpu_lockup lockup; ++}; ++ ++struct evergreen_asic { ++ unsigned num_ses; + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; +@@ -853,7 +945,7 @@ struct rv770_asic { + unsigned sx_num_of_sets; + unsigned sc_prim_fifo_size; + unsigned sc_hiz_tile_fifo_size; +- unsigned sc_earlyz_tile_fifo_fize; ++ unsigned sc_earlyz_tile_fifo_size; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; +@@ -864,6 +956,7 @@ union radeon_asic_config { + struct r100_asic r100; + struct r600_asic r600; + struct rv770_asic rv770; ++ struct evergreen_asic evergreen; + }; + + /* +@@ -927,9 +1020,6 @@ struct radeon_device { + bool is_atom_bios; + uint16_t bios_header_start; + struct radeon_bo *stollen_vga_memory; +- struct fb_info *fbdev_info; +- struct radeon_bo *fbdev_rbo; +- struct radeon_framebuffer *fbdev_rfb; + /* Register mmio */ + resource_size_t rmmio_base; + resource_size_t rmmio_size; +@@ -974,6 +1064,7 @@ struct radeon_device { + struct work_struct hotplug_work; + int num_crtc; /* number of crtcs */ + struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ ++ struct mutex vram_mutex; + + /* audio stuff */ + struct timer_list audio_timer; +@@ -984,6 +1075,7 @@ struct radeon_device { + uint8_t audio_category_code; + + bool powered_down; ++ struct notifier_block acpi_nb; + }; + + int radeon_device_init(struct radeon_device *rdev, +@@ -1145,7 +1237,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) + #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) + #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) + #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) +-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) ++#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) ++#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) + #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) + #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) + #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) +@@ -1173,9 +1266,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) + #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) + #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) + #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) ++#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) ++#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) ++#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) ++#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) ++#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) ++#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) + + /* Common functions */ + /* AGP */ ++extern int radeon_gpu_reset(struct radeon_device *rdev); + extern void radeon_agp_disable(struct radeon_device *rdev); + extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); + extern void radeon_gart_restore(struct radeon_device *rdev); +@@ -1200,6 +1300,8 @@ extern int radeon_resume_kms(struct drm_device *dev); + extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); + + /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ ++extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp); ++extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp); + + /* rv200,rv250,rv280 */ + extern void r200_set_safe_registers(struct radeon_device *rdev); +@@ -1260,6 +1362,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, + extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); + extern bool r600_card_posted(struct radeon_device *rdev); + extern void r600_cp_stop(struct radeon_device *rdev); ++extern int r600_cp_start(struct radeon_device *rdev); + extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); + extern int r600_cp_resume(struct radeon_device *rdev); + extern void r600_cp_fini(struct radeon_device *rdev); +@@ -1276,29 +1379,39 @@ extern void r600_scratch_init(struct radeon_device *rdev); + extern int r600_blit_init(struct radeon_device *rdev); + extern void r600_blit_fini(struct radeon_device *rdev); + extern int r600_init_microcode(struct radeon_device *rdev); +-extern int r600_gpu_reset(struct radeon_device *rdev); ++extern int r600_asic_reset(struct radeon_device *rdev); + /* r600 irq */ + extern int r600_irq_init(struct radeon_device *rdev); + extern void r600_irq_fini(struct radeon_device *rdev); + extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); + extern int r600_irq_set(struct radeon_device *rdev); + extern void r600_irq_suspend(struct radeon_device *rdev); ++extern void r600_disable_interrupts(struct radeon_device *rdev); ++extern void r600_rlc_stop(struct radeon_device *rdev); + /* r600 audio */ + extern int r600_audio_init(struct radeon_device *rdev); + extern int r600_audio_tmds_index(struct drm_encoder *encoder); + extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); ++extern int r600_audio_channels(struct radeon_device *rdev); ++extern int r600_audio_bits_per_sample(struct radeon_device *rdev); ++extern int r600_audio_rate(struct radeon_device *rdev); ++extern uint8_t r600_audio_status_bits(struct radeon_device *rdev); ++extern uint8_t r600_audio_category_code(struct radeon_device *rdev); ++extern void r600_audio_schedule_polling(struct radeon_device *rdev); ++extern void r600_audio_enable_polling(struct drm_encoder *encoder); ++extern void r600_audio_disable_polling(struct drm_encoder *encoder); + extern void r600_audio_fini(struct radeon_device *rdev); + extern void r600_hdmi_init(struct drm_encoder *encoder); + extern void r600_hdmi_enable(struct drm_encoder *encoder); + extern void r600_hdmi_disable(struct drm_encoder *encoder); + extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); + extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); +-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, +- int channels, +- int rate, +- int bps, +- uint8_t status_bits, +- uint8_t category_code); ++extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); ++ ++extern void r700_cp_stop(struct radeon_device *rdev); ++extern void r700_cp_fini(struct radeon_device *rdev); ++extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); ++extern int evergreen_irq_set(struct radeon_device *rdev); + + /* evergreen */ + struct evergreen_mc_save { +diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c +index 28e473f..f40dfb7 100644 +--- a/drivers/gpu/drm/radeon/radeon_agp.c ++++ b/drivers/gpu/drm/radeon/radeon_agp.c +@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev) + } + #endif + } ++ ++void radeon_agp_suspend(struct radeon_device *rdev) ++{ ++ radeon_agp_fini(rdev); ++} +diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c +index a4b4bc9..87f7e2c 100644 +--- a/drivers/gpu/drm/radeon/radeon_asic.c ++++ b/drivers/gpu/drm/radeon/radeon_asic.c +@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = { + .suspend = &r100_suspend, + .resume = &r100_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r100_gpu_reset, ++ .gpu_is_lockup = &r100_gpu_is_lockup, ++ .asic_reset = &r100_asic_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = { + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &r100_pm_misc, ++ .pm_prepare = &r100_pm_prepare, ++ .pm_finish = &r100_pm_finish, ++ .pm_init_profile = &r100_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic r200_asic = { +@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = { + .suspend = &r100_suspend, + .resume = &r100_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r100_gpu_reset, ++ .gpu_is_lockup = &r100_gpu_is_lockup, ++ .asic_reset = &r100_asic_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = { + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &r100_pm_misc, ++ .pm_prepare = &r100_pm_prepare, ++ .pm_finish = &r100_pm_finish, ++ .pm_init_profile = &r100_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic r300_asic = { +@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = { + .suspend = &r300_suspend, + .resume = &r300_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r300_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = { + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &r100_pm_misc, ++ .pm_prepare = &r100_pm_prepare, ++ .pm_finish = &r100_pm_finish, ++ .pm_init_profile = &r100_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic r300_asic_pcie = { +@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = { + .suspend = &r300_suspend, + .resume = &r300_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r300_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = { + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &r100_pm_misc, ++ .pm_prepare = &r100_pm_prepare, ++ .pm_finish = &r100_pm_finish, ++ .pm_init_profile = &r100_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic r420_asic = { +@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = { + .suspend = &r420_suspend, + .resume = &r420_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r300_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = { + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &r100_pm_misc, ++ .pm_prepare = &r100_pm_prepare, ++ .pm_finish = &r100_pm_finish, ++ .pm_init_profile = &r420_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic rs400_asic = { +@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = { + .suspend = &rs400_suspend, + .resume = &rs400_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r300_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &r300_asic_reset, + .gart_tlb_flush = &rs400_gart_tlb_flush, + .gart_set_page = &rs400_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = { + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &r100_pm_misc, ++ .pm_prepare = &r100_pm_prepare, ++ .pm_finish = &r100_pm_finish, ++ .pm_init_profile = &r100_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic rs600_asic = { +@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = { + .suspend = &rs600_suspend, + .resume = &rs600_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r300_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rs600_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = { + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &rs600_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &r420_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic rs690_asic = { +@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = { + .suspend = &rs690_suspend, + .resume = &rs690_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &r300_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rs400_gart_tlb_flush, + .gart_set_page = &rs400_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = { + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &rs600_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &r420_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic rv515_asic = { +@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = { + .suspend = &rv515_suspend, + .resume = &rv515_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &rv515_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = { + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &rs600_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &r420_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic r520_asic = { +@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = { + .suspend = &rv515_suspend, + .resume = &r520_resume, + .vga_set_state = &r100_vga_set_state, +- .gpu_reset = &rv515_gpu_reset, ++ .gpu_is_lockup = &r300_gpu_is_lockup, ++ .asic_reset = &rs600_asic_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, +@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = { + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, + .ioctl_wait_idle = NULL, ++ .gui_idle = &r100_gui_idle, ++ .pm_misc = &rs600_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &r420_pm_init_profile, ++ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + }; + + static struct radeon_asic r600_asic = { +@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = { + .resume = &r600_resume, + .cp_commit = &r600_cp_commit, + .vga_set_state = &r600_vga_set_state, +- .gpu_reset = &r600_gpu_reset, ++ .gpu_is_lockup = &r600_gpu_is_lockup, ++ .asic_reset = &r600_asic_reset, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, +@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = { + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, ++ .gui_idle = &r600_gui_idle, ++ .pm_misc = &r600_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &r600_pm_init_profile, ++ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + }; + + static struct radeon_asic rs780_asic = { +@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = { + .suspend = &r600_suspend, + .resume = &r600_resume, + .cp_commit = &r600_cp_commit, ++ .gpu_is_lockup = &r600_gpu_is_lockup, + .vga_set_state = &r600_vga_set_state, +- .gpu_reset = &r600_gpu_reset, ++ .asic_reset = &r600_asic_reset, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, +@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = { + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, ++ .gui_idle = &r600_gui_idle, ++ .pm_misc = &r600_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &rs780_pm_init_profile, ++ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + }; + + static struct radeon_asic rv770_asic = { +@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = { + .suspend = &rv770_suspend, + .resume = &rv770_resume, + .cp_commit = &r600_cp_commit, +- .gpu_reset = &rv770_gpu_reset, ++ .asic_reset = &r600_asic_reset, ++ .gpu_is_lockup = &r600_gpu_is_lockup, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, +@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = { + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, ++ .gui_idle = &r600_gui_idle, ++ .pm_misc = &rv770_pm_misc, ++ .pm_prepare = &rs600_pm_prepare, ++ .pm_finish = &rs600_pm_finish, ++ .pm_init_profile = &r600_pm_init_profile, ++ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + }; + + static struct radeon_asic evergreen_asic = { +@@ -622,18 +713,19 @@ static struct radeon_asic evergreen_asic = { + .fini = &evergreen_fini, + .suspend = &evergreen_suspend, + .resume = &evergreen_resume, +- .cp_commit = NULL, +- .gpu_reset = &evergreen_gpu_reset, ++ .cp_commit = &r600_cp_commit, ++ .gpu_is_lockup = &evergreen_gpu_is_lockup, ++ .asic_reset = &evergreen_asic_reset, + .vga_set_state = &r600_vga_set_state, +- .gart_tlb_flush = &r600_pcie_gart_tlb_flush, ++ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, +- .ring_test = NULL, +- .ring_ib_execute = NULL, +- .irq_set = NULL, +- .irq_process = NULL, +- .get_vblank_counter = NULL, +- .fence_ring_emit = NULL, +- .cs_parse = NULL, ++ .ring_test = &r600_ring_test, ++ .ring_ib_execute = &r600_ring_ib_execute, ++ .irq_set = &evergreen_irq_set, ++ .irq_process = &evergreen_irq_process, ++ .get_vblank_counter = &evergreen_get_vblank_counter, ++ .fence_ring_emit = &r600_fence_ring_emit, ++ .cs_parse = &evergreen_cs_parse, + .copy_blit = NULL, + .copy_dma = NULL, + .copy = NULL, +@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = { + .hpd_fini = &evergreen_hpd_fini, + .hpd_sense = &evergreen_hpd_sense, + .hpd_set_polarity = &evergreen_hpd_set_polarity, ++ .gui_idle = &r600_gui_idle, ++ .pm_misc = &evergreen_pm_misc, ++ .pm_prepare = &evergreen_pm_prepare, ++ .pm_finish = &evergreen_pm_finish, ++ .pm_init_profile = &r600_pm_init_profile, ++ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + }; + + int radeon_asic_init(struct radeon_device *rdev) +diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h +index a0b8280..c0bbaa6 100644 +--- a/drivers/gpu/drm/radeon/radeon_asic.h ++++ b/drivers/gpu/drm/radeon/radeon_asic.h +@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev); + uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); + void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); + void r100_vga_set_state(struct radeon_device *rdev, bool state); +-int r100_gpu_reset(struct radeon_device *rdev); ++bool r100_gpu_is_lockup(struct radeon_device *rdev); ++int r100_asic_reset(struct radeon_device *rdev); + u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); + void r100_pci_gart_tlb_flush(struct radeon_device *rdev); + int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev); + void r100_wb_disable(struct radeon_device *rdev); + void r100_wb_fini(struct radeon_device *rdev); + int r100_wb_init(struct radeon_device *rdev); +-void r100_hdp_reset(struct radeon_device *rdev); +-int r100_rb2d_reset(struct radeon_device *rdev); + int r100_cp_reset(struct radeon_device *rdev); + void r100_vga_render_disable(struct radeon_device *rdev); + int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, +@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, + unsigned idx); + void r100_enable_bm(struct radeon_device *rdev); + void r100_set_common_regs(struct radeon_device *rdev); ++void r100_bm_disable(struct radeon_device *rdev); ++extern bool r100_gui_idle(struct radeon_device *rdev); ++extern void r100_pm_misc(struct radeon_device *rdev); ++extern void r100_pm_prepare(struct radeon_device *rdev); ++extern void r100_pm_finish(struct radeon_device *rdev); ++extern void r100_pm_init_profile(struct radeon_device *rdev); ++extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); + + /* + * r200,rv250,rs300,rv280 +@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_pages, +- struct radeon_fence *fence); ++ struct radeon_fence *fence); + + /* + * r300,r350,rv350,rv380 +@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev); + extern void r300_fini(struct radeon_device *rdev); + extern int r300_suspend(struct radeon_device *rdev); + extern int r300_resume(struct radeon_device *rdev); +-extern int r300_gpu_reset(struct radeon_device *rdev); ++extern bool r300_gpu_is_lockup(struct radeon_device *rdev); ++extern int r300_asic_reset(struct radeon_device *rdev); + extern void r300_ring_start(struct radeon_device *rdev); + extern void r300_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev); + extern void r420_fini(struct radeon_device *rdev); + extern int r420_suspend(struct radeon_device *rdev); + extern int r420_resume(struct radeon_device *rdev); ++extern void r420_pm_init_profile(struct radeon_device *rdev); + + /* + * rs400,rs480 +@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); + /* + * rs600. + */ ++extern int rs600_asic_reset(struct radeon_device *rdev); + extern int rs600_init(struct radeon_device *rdev); + extern void rs600_fini(struct radeon_device *rdev); + extern int rs600_suspend(struct radeon_device *rdev); +@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev); + bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); + void rs600_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); ++extern void rs600_pm_misc(struct radeon_device *rdev); ++extern void rs600_pm_prepare(struct radeon_device *rdev); ++extern void rs600_pm_finish(struct radeon_device *rdev); + + /* + * rs690,rs740 +@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev); + */ + int rv515_init(struct radeon_device *rdev); + void rv515_fini(struct radeon_device *rdev); +-int rv515_gpu_reset(struct radeon_device *rdev); + uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); + void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); + void rv515_ring_start(struct radeon_device *rdev); +@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev, + struct radeon_fence *fence); + int r600_irq_process(struct radeon_device *rdev); + int r600_irq_set(struct radeon_device *rdev); +-int r600_gpu_reset(struct radeon_device *rdev); ++bool r600_gpu_is_lockup(struct radeon_device *rdev); ++int r600_asic_reset(struct radeon_device *rdev); + int r600_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); +@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); + void r600_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); + extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); ++extern bool r600_gui_idle(struct radeon_device *rdev); ++extern void r600_pm_misc(struct radeon_device *rdev); ++extern void r600_pm_init_profile(struct radeon_device *rdev); ++extern void rs780_pm_init_profile(struct radeon_device *rdev); ++extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); + + /* + * rv770,rv730,rv710,rv740 +@@ -276,20 +293,30 @@ int rv770_init(struct radeon_device *rdev); + void rv770_fini(struct radeon_device *rdev); + int rv770_suspend(struct radeon_device *rdev); + int rv770_resume(struct radeon_device *rdev); +-int rv770_gpu_reset(struct radeon_device *rdev); ++extern void rv770_pm_misc(struct radeon_device *rdev); + + /* + * evergreen + */ ++void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); + int evergreen_init(struct radeon_device *rdev); + void evergreen_fini(struct radeon_device *rdev); + int evergreen_suspend(struct radeon_device *rdev); + int evergreen_resume(struct radeon_device *rdev); +-int evergreen_gpu_reset(struct radeon_device *rdev); ++bool evergreen_gpu_is_lockup(struct radeon_device *rdev); ++int evergreen_asic_reset(struct radeon_device *rdev); + void evergreen_bandwidth_update(struct radeon_device *rdev); + void evergreen_hpd_init(struct radeon_device *rdev); + void evergreen_hpd_fini(struct radeon_device *rdev); + bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); + void evergreen_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); ++u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); ++int evergreen_irq_set(struct radeon_device *rdev); ++int evergreen_irq_process(struct radeon_device *rdev); ++extern int evergreen_cs_parse(struct radeon_cs_parser *p); ++extern void evergreen_pm_misc(struct radeon_device *rdev); ++extern void evergreen_pm_prepare(struct radeon_device *rdev); ++extern void evergreen_pm_finish(struct radeon_device *rdev); ++ + #endif +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 9916d82..99bd8a9 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + } + + /* look up gpio for ddc, hpd */ ++ ddc_bus.valid = false; ++ hpd.hpd = RADEON_HPD_NONE; + if ((le16_to_cpu(path->usDeviceTag) & + (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { + for (j = 0; j < con_obj->ucNumberOfObjects; j++) { +@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + ATOM_I2C_RECORD *i2c_record; + ATOM_HPD_INT_RECORD *hpd_record; + ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; +- hpd.hpd = RADEON_HPD_NONE; + + while (record->ucRecordType > 0 + && record-> +@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + break; + } + } +- } else { +- hpd.hpd = RADEON_HPD_NONE; +- ddc_bus.valid = false; + } + + /* needed for aux chan transactions */ +- ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0; ++ ddc_bus.hpd = hpd.hpd; + + conn_id = le16_to_cpu(path->usConnObjectId); + +@@ -682,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct + uint8_t dac; + union atom_supported_devices *supported_devices; + int i, j, max_device; +- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; ++ struct bios_connector *bios_connectors; ++ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; + +- if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) ++ bios_connectors = kzalloc(bc_size, GFP_KERNEL); ++ if (!bios_connectors) ++ return false; ++ ++ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, ++ &data_offset)) { ++ kfree(bios_connectors); + return false; ++ } + + supported_devices = + (union atom_supported_devices *)(ctx->bios + data_offset); +@@ -853,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct + + radeon_link_encoder_connector(dev); + ++ kfree(bios_connectors); + return true; + } + +@@ -1174,7 +1181,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct + lvds->native_mode.vtotal = lvds->native_mode.vdisplay + + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); + lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + +- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); ++ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); + lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); + lvds->panel_pwr_delay = +@@ -1442,26 +1449,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) + + static const char *thermal_controller_names[] = { + "NONE", +- "LM63", +- "ADM1032", +- "ADM1030", +- "MUA6649", +- "LM64", +- "F75375", +- "ASC7512", ++ "lm63", ++ "adm1032", ++ "adm1030", ++ "max6649", ++ "lm64", ++ "f75375", ++ "asc7xxx", + }; + + static const char *pp_lib_thermal_controller_names[] = { + "NONE", +- "LM63", +- "ADM1032", +- "ADM1030", +- "MUA6649", +- "LM64", +- "F75375", ++ "lm63", ++ "adm1032", ++ "adm1030", ++ "max6649", ++ "lm64", ++ "f75375", + "RV6xx", + "RV770", +- "ADT7473", ++ "adt7473", ++ "External GPIO", ++ "Evergreen", ++ "adt7473 with internal", ++ + }; + + union power_info { +@@ -1485,7 +1496,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + int state_index = 0, mode_index = 0; + struct radeon_i2c_bus_rec i2c_bus; + +- rdev->pm.default_power_state = NULL; ++ rdev->pm.default_power_state_index = -1; + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { +@@ -1498,10 +1509,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + power_info->info.ucOverdriveControllerAddress >> 1); + i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); + rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); ++ if (rdev->pm.i2c_bus) { ++ struct i2c_board_info info = { }; ++ const char *name = thermal_controller_names[power_info->info. ++ ucOverdriveThermalController]; ++ info.addr = power_info->info.ucOverdriveControllerAddress >> 1; ++ strlcpy(info.type, name, sizeof(info.type)); ++ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); ++ } + } + num_modes = power_info->info.ucNumOfPowerModeEntries; + if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) + num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; ++ /* last mode is usually default, array is low to high */ + for (i = 0; i < num_modes; i++) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + switch (frev) { +@@ -1515,16 +1535,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; +- /* skip overclock modes for now */ +- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > +- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || +- (rdev->pm.power_state[state_index].clock_info[0].sclk > +- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) +- continue; +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = ++ rdev->pm.power_state[state_index].pcie_lanes = + power_info->info.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); +- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { ++ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || ++ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = +@@ -1542,6 +1557,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; + } ++ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ rdev->pm.power_state[state_index].misc = misc; + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = +@@ -1555,15 +1572,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; +- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) ++ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ } + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; +- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; ++ rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ } else if (state_index == 0) { ++ rdev->pm.power_state[state_index].clock_info[0].flags |= ++ RADEON_PM_MODE_NO_DISPLAY; + } + state_index++; + break; +@@ -1577,17 +1602,12 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; +- /* skip overclock modes for now */ +- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > +- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || +- (rdev->pm.power_state[state_index].clock_info[0].sclk > +- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) +- continue; +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = ++ rdev->pm.power_state[state_index].pcie_lanes = + power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); +- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { ++ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || ++ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = +@@ -1605,6 +1625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; + } ++ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ rdev->pm.power_state[state_index].misc = misc; ++ rdev->pm.power_state[state_index].misc2 = misc2; + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = +@@ -1618,18 +1641,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; +- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) ++ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ } + if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; ++ if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; +- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; ++ rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ } else if (state_index == 0) { ++ rdev->pm.power_state[state_index].clock_info[0].flags |= ++ RADEON_PM_MODE_NO_DISPLAY; + } + state_index++; + break; +@@ -1643,17 +1677,12 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; +- /* skip overclock modes for now */ +- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > +- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || +- (rdev->pm.power_state[state_index].clock_info[0].sclk > +- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) +- continue; +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = ++ rdev->pm.power_state[state_index].pcie_lanes = + power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); +- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { ++ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || ++ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = +@@ -1677,6 +1706,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; + } + } ++ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ rdev->pm.power_state[state_index].misc = misc; ++ rdev->pm.power_state[state_index].misc2 = misc2; + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = +@@ -1690,42 +1722,89 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; +- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) ++ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ } + if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; +- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; ++ rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; ++ } else if (state_index == 0) { ++ rdev->pm.power_state[state_index].clock_info[0].flags |= ++ RADEON_PM_MODE_NO_DISPLAY; + } + state_index++; + break; + } + } +- } else if (frev == 4) { ++ /* last mode is usually default */ ++ if (rdev->pm.default_power_state_index == -1) { ++ rdev->pm.power_state[state_index - 1].type = ++ POWER_STATE_TYPE_DEFAULT; ++ rdev->pm.default_power_state_index = state_index - 1; ++ rdev->pm.power_state[state_index - 1].default_clock_mode = ++ &rdev->pm.power_state[state_index - 1].clock_info[0]; ++ rdev->pm.power_state[state_index].flags &= ++ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; ++ rdev->pm.power_state[state_index].misc = 0; ++ rdev->pm.power_state[state_index].misc2 = 0; ++ } ++ } else { ++ int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); ++ uint8_t fw_frev, fw_crev; ++ uint16_t fw_data_offset, vddc = 0; ++ union firmware_info *firmware_info; ++ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; ++ ++ if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, ++ &fw_frev, &fw_crev, &fw_data_offset)) { ++ firmware_info = ++ (union firmware_info *)(mode_info->atom_context->bios + ++ fw_data_offset); ++ vddc = firmware_info->info_14.usBootUpVDDCVoltage; ++ } ++ + /* add the i2c bus for thermal/fan chip */ + /* no support for internal controller yet */ +- if (power_info->info_4.sThermalController.ucType > 0) { +- if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || +- (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { ++ if (controller->ucType > 0) { ++ if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || ++ (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || ++ (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { + DRM_INFO("Internal thermal controller %s fan control\n", +- (power_info->info_4.sThermalController.ucFanParameters & ++ (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); ++ } else if ((controller->ucType == ++ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || ++ (controller->ucType == ++ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { ++ DRM_INFO("Special thermal controller config\n"); + } else { + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", +- pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], +- power_info->info_4.sThermalController.ucI2cAddress >> 1, +- (power_info->info_4.sThermalController.ucFanParameters & ++ pp_lib_thermal_controller_names[controller->ucType], ++ controller->ucI2cAddress >> 1, ++ (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); +- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine); ++ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); + rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); ++ if (rdev->pm.i2c_bus) { ++ struct i2c_board_info info = { }; ++ const char *name = pp_lib_thermal_controller_names[controller->ucType]; ++ info.addr = controller->ucI2cAddress >> 1; ++ strlcpy(info.type, name, sizeof(info.type)); ++ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); ++ } ++ + } + } ++ /* first mode is usually default, followed by low to high */ + for (i = 0; i < power_info->info_4.ucNumStates; i++) { + mode_index = 0; + power_state = (struct _ATOM_PPLIB_STATE *) +@@ -1754,14 +1833,31 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + /* skip invalid modes */ + if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) + continue; +- /* skip overclock modes for now */ +- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > +- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN) ++ /* voltage works differently on IGPs */ ++ mode_index++; ++ } else if (ASIC_IS_DCE4(rdev)) { ++ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = ++ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) ++ (mode_info->atom_context->bios + ++ data_offset + ++ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + ++ (power_state->ucClockStateIndices[j] * ++ power_info->info_4.ucClockInfoSize)); ++ sclk = le16_to_cpu(clock_info->usEngineClockLow); ++ sclk |= clock_info->ucEngineClockHigh << 16; ++ mclk = le16_to_cpu(clock_info->usMemoryClockLow); ++ mclk |= clock_info->ucMemoryClockHigh << 16; ++ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; ++ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; ++ /* skip invalid modes */ ++ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || ++ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) + continue; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->usVDDC; ++ /* XXX usVDDCI */ + mode_index++; + } else { + struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = +@@ -1781,12 +1877,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) + continue; +- /* skip overclock modes for now */ +- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > +- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || +- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > +- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) +- continue; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = +@@ -1798,7 +1888,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + if (mode_index) { + misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); + misc2 = le16_to_cpu(non_clock_info->usClassification); +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = ++ rdev->pm.power_state[state_index].misc = misc; ++ rdev->pm.power_state[state_index].misc2 = misc2; ++ rdev->pm.power_state[state_index].pcie_lanes = + ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> + ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { +@@ -1815,22 +1907,46 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + POWER_STATE_TYPE_PERFORMANCE; + break; + } ++ rdev->pm.power_state[state_index].flags = 0; ++ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ++ rdev->pm.power_state[state_index].flags |= ++ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; +- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; ++ rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; ++ /* patch the table values with the default slck/mclk from firmware info */ ++ for (j = 0; j < mode_index; j++) { ++ rdev->pm.power_state[state_index].clock_info[j].mclk = ++ rdev->clock.default_mclk; ++ rdev->pm.power_state[state_index].clock_info[j].sclk = ++ rdev->clock.default_sclk; ++ if (vddc) ++ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = ++ vddc; ++ } + } + state_index++; + } + } ++ /* if multiple clock modes, mark the lowest as no display */ ++ for (i = 0; i < state_index; i++) { ++ if (rdev->pm.power_state[i].num_clock_modes > 1) ++ rdev->pm.power_state[i].clock_info[0].flags |= ++ RADEON_PM_MODE_NO_DISPLAY; ++ } ++ /* first mode is usually default */ ++ if (rdev->pm.default_power_state_index == -1) { ++ rdev->pm.power_state[0].type = ++ POWER_STATE_TYPE_DEFAULT; ++ rdev->pm.default_power_state_index = 0; ++ rdev->pm.power_state[0].default_clock_mode = ++ &rdev->pm.power_state[0].clock_info[0]; ++ } + } + } else { +- /* XXX figure out some good default low power mode for cards w/out power tables */ +- } +- +- if (rdev->pm.default_power_state == NULL) { + /* add the default mode */ + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; +@@ -1840,18 +1956,17 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; +- if (rdev->asic->get_pcie_lanes) +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); +- else +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; +- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; ++ rdev->pm.power_state[state_index].pcie_lanes = 16; ++ rdev->pm.default_power_state_index = state_index; ++ rdev->pm.power_state[state_index].flags = 0; + state_index++; + } ++ + rdev->pm.num_power_states = state_index; + +- rdev->pm.current_power_state = rdev->pm.default_power_state; +- rdev->pm.current_clock_mode = +- rdev->pm.default_power_state->default_clock_mode; ++ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; ++ rdev->pm.current_clock_mode_index = 0; ++ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + } + + void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) +@@ -1907,6 +2022,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } + ++union set_voltage { ++ struct _SET_VOLTAGE_PS_ALLOCATION alloc; ++ struct _SET_VOLTAGE_PARAMETERS v1; ++ struct _SET_VOLTAGE_PARAMETERS_V2 v2; ++}; ++ ++void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) ++{ ++ union set_voltage args; ++ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); ++ u8 frev, crev, volt_index = level; ++ ++ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) ++ return; ++ ++ switch (crev) { ++ case 1: ++ args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; ++ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; ++ args.v1.ucVoltageIndex = volt_index; ++ break; ++ case 2: ++ args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; ++ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; ++ args.v2.usVoltageLevel = cpu_to_le16(level); ++ break; ++ default: ++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev); ++ return; ++ } ++ ++ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++ ++ + void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) + { + struct radeon_device *rdev = dev->dev_private; +diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c +index 8ad71f7..fbba938 100644 +--- a/drivers/gpu/drm/radeon/radeon_bios.c ++++ b/drivers/gpu/drm/radeon/radeon_bios.c +@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev) + pci_unmap_rom(rdev->pdev, bios); + return false; + } +- rdev->bios = kmalloc(size, GFP_KERNEL); ++ rdev->bios = kmemdup(bios, size, GFP_KERNEL); + if (rdev->bios == NULL) { + pci_unmap_rom(rdev->pdev, bios); + return false; + } +- memcpy(rdev->bios, bios, size); + pci_unmap_rom(rdev->pdev, bios); + return true; + } +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +index 37db8ad..1bee2f9 100644 +--- a/drivers/gpu/drm/radeon/radeon_combios.c ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) + { + int edid_info; + struct edid *edid; ++ unsigned char *raw; + edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); + if (!edid_info) + return false; + +- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), +- GFP_KERNEL); ++ raw = rdev->bios + edid_info; ++ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); + if (edid == NULL) + return false; + +- memcpy((unsigned char *)edid, +- (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH); ++ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); + + if (!drm_edid_is_valid(edid)) { + kfree(edid); +@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde + } + i2c.mm_i2c = false; + i2c.i2c_id = 0; +- i2c.hpd_id = 0; ++ i2c.hpd = RADEON_HPD_NONE; + + if (ddc_line) + i2c.valid = true; +@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder + break; + + if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && +- (RBIOS16(tmp + 2) == +- lvds->native_mode.vdisplay)) { +- lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8; +- lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8; +- lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) + +- RBIOS16(tmp + 21)) * 8; +- +- lvds->native_mode.vtotal = RBIOS16(tmp + 24); +- lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff; +- lvds->native_mode.vsync_end = +- ((RBIOS16(tmp + 28) & 0xf800) >> 11) + +- (RBIOS16(tmp + 28) & 0x7ff); ++ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { ++ lvds->native_mode.htotal = lvds->native_mode.hdisplay + ++ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; ++ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + ++ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; ++ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + ++ (RBIOS8(tmp + 23) * 8); ++ ++ lvds->native_mode.vtotal = lvds->native_mode.vdisplay + ++ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26)); ++ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + ++ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26)); ++ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + ++ ((RBIOS16(tmp + 28) & 0xf800) >> 11); + + lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; + lvds->native_mode.flags = 0; +@@ -2024,6 +2026,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) + combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + break; + default: ++ ddc_i2c.valid = false; + break; + } + +@@ -2196,7 +2199,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) + ATOM_DEVICE_DFP1_SUPPORT); + + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); +- hpd.hpd = RADEON_HPD_NONE; ++ hpd.hpd = RADEON_HPD_1; + radeon_add_legacy_connector(dev, + 0, + ATOM_DEVICE_CRT1_SUPPORT | +@@ -2337,6 +2340,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) + if (RBIOS8(tv_info + 6) == 'T') { + if (radeon_apply_legacy_tv_quirks(dev)) { + hpd.hpd = RADEON_HPD_NONE; ++ ddc_i2c.valid = false; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_id + (dev, +@@ -2366,7 +2370,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) + u8 rev, blocks, tmp; + int state_index = 0; + +- rdev->pm.default_power_state = NULL; ++ rdev->pm.default_power_state_index = -1; + + if (rdev->flags & RADEON_IS_MOBILITY) { + offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); +@@ -2380,17 +2384,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + goto default_mode; +- /* skip overclock modes for now */ +- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > +- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || +- (rdev->pm.power_state[state_index].clock_info[0].sclk > +- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) +- goto default_mode; + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + misc = RBIOS16(offset + 0x5 + 0x0); + if (rev > 4) + misc2 = RBIOS16(offset + 0x5 + 0xe); ++ rdev->pm.power_state[state_index].misc = misc; ++ rdev->pm.power_state[state_index].misc2 = misc2; + if (misc & 0x4) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; + if (misc & 0x8) +@@ -2437,8 +2437,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) + } else + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + if (rev > 6) +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = ++ rdev->pm.power_state[state_index].pcie_lanes = + RBIOS8(offset + 0x5 + 0x10); ++ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + state_index++; + } else { + /* XXX figure out some good default low power mode for mobility cards w/out power tables */ +@@ -2455,17 +2456,19 @@ default_mode: + rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; + rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; +- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; +- if (rdev->asic->get_pcie_lanes) +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); ++ if ((state_index > 0) && ++ (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO)) ++ rdev->pm.power_state[state_index].clock_info[0].voltage = ++ rdev->pm.power_state[0].clock_info[0].voltage; + else +- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; +- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; ++ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; ++ rdev->pm.power_state[state_index].pcie_lanes = 16; ++ rdev->pm.power_state[state_index].flags = 0; ++ rdev->pm.default_power_state_index = state_index; + rdev->pm.num_power_states = state_index + 1; + +- rdev->pm.current_power_state = rdev->pm.default_power_state; +- rdev->pm.current_clock_mode = +- rdev->pm.default_power_state->default_clock_mode; ++ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; ++ rdev->pm.current_clock_mode_index = 0; + } + + void radeon_external_tmds_setup(struct drm_encoder *encoder) +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 4559a53..0c7ccc6 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev, + struct radeon_connector_atom_dig *radeon_dig_connector; + uint32_t subpixel_order = SubPixelNone; + bool shared_ddc = false; +- int ret; + + /* fixme - tv/cv/din */ + if (connector_type == DRM_MODE_CONNECTOR_Unknown) +@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev, + switch (connector_type) { + case DRM_MODE_CONNECTOR_VGA: + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); + if (!radeon_connector->ddc_bus) +@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev, + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; + break; + case DRM_MODE_CONNECTOR_DVIA: + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); + if (!radeon_connector->ddc_bus) +@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev, + radeon_dig_connector->igp_lane_info = igp_lane_info; + radeon_connector->con_priv = radeon_dig_connector; + drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); + if (!radeon_connector->ddc_bus) +@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev, + radeon_dig_connector->igp_lane_info = igp_lane_info; + radeon_connector->con_priv = radeon_dig_connector; + drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); + if (!radeon_connector->ddc_bus) +@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev, + radeon_dig_connector->igp_lane_info = igp_lane_info; + radeon_connector->con_priv = radeon_dig_connector; + drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); + if (i2c_bus->valid) { + /* add DP i2c bus */ + if (connector_type == DRM_MODE_CONNECTOR_eDP) +@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev, + case DRM_MODE_CONNECTOR_9PinDIN: + if (radeon_tv == 1) { + drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); + radeon_connector->dac_load_detect = true; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, +@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev, + radeon_dig_connector->igp_lane_info = igp_lane_info; + radeon_connector->con_priv = radeon_dig_connector; + drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); + if (!radeon_connector->ddc_bus) +@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev, + break; + } + ++ if (hpd->hpd == RADEON_HPD_NONE) { ++ if (i2c_bus->valid) ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ } else ++ connector->polled = DRM_CONNECTOR_POLL_HPD; ++ + connector->display_info.subpixel_order = subpixel_order; + drm_sysfs_connector_add(connector); + return; +@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev, + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + uint32_t subpixel_order = SubPixelNone; +- int ret; + + /* fixme - tv/cv/din */ + if (connector_type == DRM_MODE_CONNECTOR_Unknown) +@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev, + switch (connector_type) { + case DRM_MODE_CONNECTOR_VGA: + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); + if (!radeon_connector->ddc_bus) +@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev, + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; + break; + case DRM_MODE_CONNECTOR_DVIA: + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); + if (!radeon_connector->ddc_bus) +@@ -1309,9 +1297,7 @@ radeon_add_legacy_connector(struct drm_device *dev, + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_DVID: + drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); + if (!radeon_connector->ddc_bus) +@@ -1330,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev, + case DRM_MODE_CONNECTOR_9PinDIN: + if (radeon_tv == 1) { + drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); + radeon_connector->dac_load_detect = true; + /* RS400,RC410,RS480 chipset seems to report a lot + * of false positive on load detect, we haven't yet +@@ -1351,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev, + break; + case DRM_MODE_CONNECTOR_LVDS: + drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); +- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); +- if (ret) +- goto failed; ++ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); + if (i2c_bus->valid) { + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); + if (!radeon_connector->ddc_bus) +@@ -1366,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev, + break; + } + ++ if (hpd->hpd == RADEON_HPD_NONE) { ++ if (i2c_bus->valid) ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ } else ++ connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; + drm_sysfs_connector_add(connector); + return; +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c +index f9b0fe0..ae0fb73 100644 +--- a/drivers/gpu/drm/radeon/radeon_cs.c ++++ b/drivers/gpu/drm/radeon/radeon_cs.c +@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + int r; + + mutex_lock(&rdev->cs_mutex); +- if (rdev->gpu_lockup) { +- mutex_unlock(&rdev->cs_mutex); +- return -EINVAL; +- } + /* initialize parser */ + memset(&parser, 0, sizeof(struct radeon_cs_parser)); + parser.filp = filp; +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 7b629e3..f10faed 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev) + sclk = radeon_get_engine_clock(rdev); + mclk = rdev->clock.default_mclk; + +- a.full = rfixed_const(100); +- rdev->pm.sclk.full = rfixed_const(sclk); +- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); +- rdev->pm.mclk.full = rfixed_const(mclk); +- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); ++ a.full = dfixed_const(100); ++ rdev->pm.sclk.full = dfixed_const(sclk); ++ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); ++ rdev->pm.mclk.full = dfixed_const(mclk); ++ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); + +- a.full = rfixed_const(16); ++ a.full = dfixed_const(16); + /* core_bandwidth = sclk(Mhz) * 16 */ +- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); ++ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); + } else { + sclk = radeon_get_engine_clock(rdev); + mclk = radeon_get_memory_clock(rdev); + +- a.full = rfixed_const(100); +- rdev->pm.sclk.full = rfixed_const(sclk); +- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); +- rdev->pm.mclk.full = rfixed_const(mclk); +- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); ++ a.full = dfixed_const(100); ++ rdev->pm.sclk.full = dfixed_const(sclk); ++ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); ++ rdev->pm.mclk.full = dfixed_const(mclk); ++ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); + } + } + +@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero + /* don't suspend or resume card normally */ + rdev->powered_down = false; + radeon_resume_kms(dev); ++ drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_INFO "radeon: switched off\n"); ++ drm_kms_helper_poll_disable(dev); + radeon_suspend_kms(dev, pmm); + /* don't suspend or resume card normally */ + rdev->powered_down = true; +@@ -599,9 +601,11 @@ int radeon_device_init(struct radeon_device *rdev, + spin_lock_init(&rdev->ih.lock); + mutex_init(&rdev->gem.mutex); + mutex_init(&rdev->pm.mutex); ++ mutex_init(&rdev->vram_mutex); + rwlock_init(&rdev->fence_drv.lock); + INIT_LIST_HEAD(&rdev->gem.objects); + init_waitqueue_head(&rdev->irq.vblank_queue); ++ init_waitqueue_head(&rdev->irq.idle_queue); + + /* setup workqueue */ + rdev->wq = create_workqueue("radeon"); +@@ -671,7 +675,7 @@ int radeon_device_init(struct radeon_device *rdev, + /* Acceleration not working on AGP card try again + * with fallback to PCI or PCIE GART + */ +- radeon_gpu_reset(rdev); ++ radeon_asic_reset(rdev); + radeon_fini(rdev); + radeon_agp_disable(rdev); + r = radeon_init(rdev); +@@ -691,6 +695,8 @@ void radeon_device_fini(struct radeon_device *rdev) + { + DRM_INFO("radeon: finishing device.\n"); + rdev->shutdown = true; ++ /* evict vram memory */ ++ radeon_bo_evict_vram(rdev); + radeon_fini(rdev); + destroy_workqueue(rdev->wq); + vga_switcheroo_unregister_client(rdev->pdev); +@@ -707,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) + { + struct radeon_device *rdev; + struct drm_crtc *crtc; ++ struct drm_connector *connector; + int r; + + if (dev == NULL || dev->dev_private == NULL) { +@@ -719,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) + + if (rdev->powered_down) + return 0; ++ ++ /* turn off display hw */ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); ++ } ++ + /* unpin the front buffers */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); +@@ -728,9 +741,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) + continue; + } + robj = rfb->obj->driver_private; +- if (robj != rdev->fbdev_rbo) { ++ /* don't unpin kernel fb objects */ ++ if (!radeon_fbdev_robj_is_fb(rdev, robj)) { + r = radeon_bo_reserve(robj, false); +- if (unlikely(r == 0)) { ++ if (r == 0) { + radeon_bo_unpin(robj); + radeon_bo_unreserve(robj); + } +@@ -743,11 +757,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) + + radeon_save_bios_scratch_regs(rdev); + ++ radeon_pm_suspend(rdev); + radeon_suspend(rdev); + radeon_hpd_fini(rdev); + /* evict remaining vram memory */ + radeon_bo_evict_vram(rdev); + ++ radeon_agp_suspend(rdev); ++ + pci_save_state(dev->pdev); + if (state.event == PM_EVENT_SUSPEND) { + /* Shut down the device */ +@@ -755,7 +772,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) + pci_set_power_state(dev->pdev, PCI_D3hot); + } + acquire_console_sem(); +- fb_set_suspend(rdev->fbdev_info, 1); ++ radeon_fbdev_set_suspend(rdev, 1); + release_console_sem(); + return 0; + } +@@ -778,8 +795,9 @@ int radeon_resume_kms(struct drm_device *dev) + /* resume AGP if in use */ + radeon_agp_resume(rdev); + radeon_resume(rdev); ++ radeon_pm_resume(rdev); + radeon_restore_bios_scratch_regs(rdev); +- fb_set_suspend(rdev->fbdev_info, 0); ++ radeon_fbdev_set_suspend(rdev, 0); + release_console_sem(); + + /* reset hpd state */ +@@ -789,6 +807,26 @@ int radeon_resume_kms(struct drm_device *dev) + return 0; + } + ++int radeon_gpu_reset(struct radeon_device *rdev) ++{ ++ int r; ++ ++ radeon_save_bios_scratch_regs(rdev); ++ radeon_suspend(rdev); ++ ++ r = radeon_asic_reset(rdev); ++ if (!r) { ++ dev_info(rdev->dev, "GPU reset succeed\n"); ++ radeon_resume(rdev); ++ radeon_restore_bios_scratch_regs(rdev); ++ drm_helper_resume_force_mode(rdev->ddev); ++ return 0; ++ } ++ /* bad news, how to tell it to userspace ? */ ++ dev_info(rdev->dev, "GPU reset failed\n"); ++ return r; ++} ++ + + /* + * Debugfs +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index bb1c122..c73444a 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -284,8 +284,7 @@ static const char *connector_names[15] = { + "eDP", + }; + +-static const char *hpd_names[7] = { +- "NONE", ++static const char *hpd_names[6] = { + "HPD1", + "HPD2", + "HPD3", +@@ -633,37 +632,37 @@ calc_fb_div(struct radeon_pll *pll, + + vco_freq = freq * post_div; + /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ +- a.full = rfixed_const(pll->reference_freq); +- feedback_divider.full = rfixed_const(vco_freq); +- feedback_divider.full = rfixed_div(feedback_divider, a); +- a.full = rfixed_const(ref_div); +- feedback_divider.full = rfixed_mul(feedback_divider, a); ++ a.full = dfixed_const(pll->reference_freq); ++ feedback_divider.full = dfixed_const(vco_freq); ++ feedback_divider.full = dfixed_div(feedback_divider, a); ++ a.full = dfixed_const(ref_div); ++ feedback_divider.full = dfixed_mul(feedback_divider, a); + + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { + /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ +- a.full = rfixed_const(10); +- feedback_divider.full = rfixed_mul(feedback_divider, a); +- feedback_divider.full += rfixed_const_half(0); +- feedback_divider.full = rfixed_floor(feedback_divider); +- feedback_divider.full = rfixed_div(feedback_divider, a); ++ a.full = dfixed_const(10); ++ feedback_divider.full = dfixed_mul(feedback_divider, a); ++ feedback_divider.full += dfixed_const_half(0); ++ feedback_divider.full = dfixed_floor(feedback_divider); ++ feedback_divider.full = dfixed_div(feedback_divider, a); + + /* *fb_div = floor(feedback_divider); */ +- a.full = rfixed_floor(feedback_divider); +- *fb_div = rfixed_trunc(a); ++ a.full = dfixed_floor(feedback_divider); ++ *fb_div = dfixed_trunc(a); + /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ +- a.full = rfixed_const(10); +- b.full = rfixed_mul(feedback_divider, a); ++ a.full = dfixed_const(10); ++ b.full = dfixed_mul(feedback_divider, a); + +- feedback_divider.full = rfixed_floor(feedback_divider); +- feedback_divider.full = rfixed_mul(feedback_divider, a); ++ feedback_divider.full = dfixed_floor(feedback_divider); ++ feedback_divider.full = dfixed_mul(feedback_divider, a); + feedback_divider.full = b.full - feedback_divider.full; +- *fb_div_frac = rfixed_trunc(feedback_divider); ++ *fb_div_frac = dfixed_trunc(feedback_divider); + } else { + /* *fb_div = floor(feedback_divider + 0.5); */ +- feedback_divider.full += rfixed_const_half(0); +- feedback_divider.full = rfixed_floor(feedback_divider); ++ feedback_divider.full += dfixed_const_half(0); ++ feedback_divider.full = dfixed_floor(feedback_divider); + +- *fb_div = rfixed_trunc(feedback_divider); ++ *fb_div = dfixed_trunc(feedback_divider); + *fb_div_frac = 0; + } + +@@ -693,10 +692,10 @@ calc_fb_ref_div(struct radeon_pll *pll, + pll_out_max = pll->pll_out_max; + } + +- ffreq.full = rfixed_const(freq); ++ ffreq.full = dfixed_const(freq); + /* max_error = ffreq * 0.0025; */ +- a.full = rfixed_const(400); +- max_error.full = rfixed_div(ffreq, a); ++ a.full = dfixed_const(400); ++ max_error.full = dfixed_div(ffreq, a); + + for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { + if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { +@@ -707,9 +706,9 @@ calc_fb_ref_div(struct radeon_pll *pll, + continue; + + /* pll_out = vco / post_div; */ +- a.full = rfixed_const(post_div); +- pll_out.full = rfixed_const(vco); +- pll_out.full = rfixed_div(pll_out, a); ++ a.full = dfixed_const(post_div); ++ pll_out.full = dfixed_const(vco); ++ pll_out.full = dfixed_div(pll_out, a); + + if (pll_out.full >= ffreq.full) { + error.full = pll_out.full - ffreq.full; +@@ -831,10 +830,6 @@ void radeon_compute_pll(struct radeon_pll *pll, + static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) + { + struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); +- struct drm_device *dev = fb->dev; +- +- if (fb->fbdev) +- radeonfb_remove(dev, fb); + + if (radeon_fb->obj) + drm_gem_object_unreference_unlocked(radeon_fb->obj); +@@ -856,21 +851,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { + .create_handle = radeon_user_framebuffer_create_handle, + }; + +-struct drm_framebuffer * +-radeon_framebuffer_create(struct drm_device *dev, +- struct drm_mode_fb_cmd *mode_cmd, +- struct drm_gem_object *obj) ++void ++radeon_framebuffer_init(struct drm_device *dev, ++ struct radeon_framebuffer *rfb, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object *obj) + { +- struct radeon_framebuffer *radeon_fb; +- +- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); +- if (radeon_fb == NULL) { +- return NULL; +- } +- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); +- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); +- radeon_fb->obj = obj; +- return &radeon_fb->base; ++ rfb->obj = obj; ++ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); ++ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); + } + + static struct drm_framebuffer * +@@ -879,6 +868,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd *mode_cmd) + { + struct drm_gem_object *obj; ++ struct radeon_framebuffer *radeon_fb; + + obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); + if (obj == NULL) { +@@ -886,12 +876,26 @@ radeon_user_framebuffer_create(struct drm_device *dev, + "can't create framebuffer\n", mode_cmd->handle); + return NULL; + } +- return radeon_framebuffer_create(dev, mode_cmd, obj); ++ ++ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); ++ if (radeon_fb == NULL) { ++ return NULL; ++ } ++ ++ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); ++ ++ return &radeon_fb->base; ++} ++ ++static void radeon_output_poll_changed(struct drm_device *dev) ++{ ++ struct radeon_device *rdev = dev->dev_private; ++ radeon_fb_output_poll_changed(rdev); + } + + static const struct drm_mode_config_funcs radeon_mode_funcs = { + .fb_create = radeon_user_framebuffer_create, +- .fb_changed = radeonfb_probe, ++ .output_poll_changed = radeon_output_poll_changed + }; + + struct drm_prop_enum_list { +@@ -978,8 +982,11 @@ void radeon_update_display_priority(struct radeon_device *rdev) + /* set display priority to high for r3xx, rv515 chips + * this avoids flickering due to underflow to the + * display controllers during heavy acceleration. ++ * Don't force high on rs4xx igp chips as it seems to ++ * affect the sound card. See kernel bug 15982. + */ +- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) ++ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && ++ !(rdev->flags & RADEON_IS_IGP)) + rdev->disp_priority = 2; + else + rdev->disp_priority = 0; +@@ -1031,15 +1038,27 @@ int radeon_modeset_init(struct radeon_device *rdev) + } + /* initialize hpd */ + radeon_hpd_init(rdev); +- drm_helper_initial_config(rdev->ddev); ++ ++ /* Initialize power management */ ++ if (radeon_pm) ++ radeon_pm_init(rdev); ++ ++ radeon_fbdev_init(rdev); ++ drm_kms_helper_poll_init(rdev->ddev); ++ + return 0; + } + + void radeon_modeset_fini(struct radeon_device *rdev) + { ++ radeon_fbdev_fini(rdev); + kfree(rdev->mode_info.bios_hardcoded_edid); + ++ if (radeon_pm) ++ radeon_pm_fini(rdev); ++ + if (rdev->mode_info.mode_config_initialized) { ++ drm_kms_helper_poll_fini(rdev->ddev); + radeon_hpd_fini(rdev); + drm_mode_config_cleanup(rdev->ddev); + rdev->mode_info.mode_config_initialized = false; +@@ -1089,15 +1108,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, + } + if (radeon_crtc->rmx_type != RMX_OFF) { + fixed20_12 a, b; +- a.full = rfixed_const(crtc->mode.vdisplay); +- b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); +- radeon_crtc->vsc.full = rfixed_div(a, b); +- a.full = rfixed_const(crtc->mode.hdisplay); +- b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); +- radeon_crtc->hsc.full = rfixed_div(a, b); ++ a.full = dfixed_const(crtc->mode.vdisplay); ++ b.full = dfixed_const(radeon_crtc->native_mode.hdisplay); ++ radeon_crtc->vsc.full = dfixed_div(a, b); ++ a.full = dfixed_const(crtc->mode.hdisplay); ++ b.full = dfixed_const(radeon_crtc->native_mode.vdisplay); ++ radeon_crtc->hsc.full = dfixed_div(a, b); + } else { +- radeon_crtc->vsc.full = rfixed_const(1); +- radeon_crtc->hsc.full = rfixed_const(1); ++ radeon_crtc->vsc.full = dfixed_const(1); ++ radeon_crtc->hsc.full = dfixed_const(1); + } + return true; + } +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index b3749d4..7ed94d2 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -44,9 +44,11 @@ + * - 2.1.0 - add square tiling interface + * - 2.2.0 - add r6xx/r7xx const buffer support + * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs ++ * - 2.4.0 - add crtc id query ++ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen + */ + #define KMS_DRIVER_MAJOR 2 +-#define KMS_DRIVER_MINOR 3 ++#define KMS_DRIVER_MINOR 5 + #define KMS_DRIVER_PATCHLEVEL 0 + int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); + int radeon_driver_unload_kms(struct drm_device *dev); +@@ -91,10 +93,10 @@ int radeon_testing = 0; + int radeon_connector_table = 0; + int radeon_tv = 1; + int radeon_new_pll = -1; +-int radeon_dynpm = -1; + int radeon_audio = 1; + int radeon_disp_priority = 0; + int radeon_hw_i2c = 0; ++int radeon_pm = 0; + + MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); + module_param_named(no_wb, radeon_no_wb, int, 0444); +@@ -132,9 +134,6 @@ module_param_named(tv, radeon_tv, int, 0444); + MODULE_PARM_DESC(new_pll, "Select new PLL code"); + module_param_named(new_pll, radeon_new_pll, int, 0444); + +-MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)"); +-module_param_named(dynpm, radeon_dynpm, int, 0444); +- + MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); + module_param_named(audio, radeon_audio, int, 0444); + +@@ -144,6 +143,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); + MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); + module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); + ++MODULE_PARM_DESC(pm, "enable power management (0 = disable)"); ++module_param_named(pm, radeon_pm, int, 0444); ++ + static int radeon_suspend(struct drm_device *dev, pm_message_t state) + { + drm_radeon_private_t *dev_priv = dev->dev_private; +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c +index c5ddaf5..1ebb100 100644 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c +@@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + +- /* adjust pm to upcoming mode change */ +- radeon_pm_compute_clocks(rdev); +- + /* set the active encoder to connector routing */ + radeon_encoder_set_active_device(encoder); + drm_mode_set_crtcinfo(adjusted_mode, 0); +@@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) + } + radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +- /* adjust pm to dpms change */ +- radeon_pm_compute_clocks(rdev); + } + + union crtc_source_param { +@@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder) + + static void radeon_atom_encoder_disable(struct drm_encoder *encoder) + { ++ struct drm_device *dev = encoder->dev; ++ struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig; + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + ++ switch (radeon_encoder->encoder_id) { ++ case ENCODER_OBJECT_ID_INTERNAL_TMDS1: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: ++ case ENCODER_OBJECT_ID_INTERNAL_LVDS: ++ case ENCODER_OBJECT_ID_INTERNAL_LVTM1: ++ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: ++ if (ASIC_IS_DCE4(rdev)) ++ /* disable the transmitter */ ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); ++ else { ++ /* disable the encoder and transmitter */ ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); ++ atombios_dig_encoder_setup(encoder, ATOM_DISABLE); ++ } ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_DDI: ++ atombios_ddia_setup(encoder, ATOM_DISABLE); ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_DVO1: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: ++ atombios_external_tmds_setup(encoder, ATOM_DISABLE); ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_DAC1: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: ++ case ENCODER_OBJECT_ID_INTERNAL_DAC2: ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: ++ atombios_dac_setup(encoder, ATOM_DISABLE); ++ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) ++ atombios_tv_setup(encoder, ATOM_DISABLE); ++ break; ++ } ++ + if (radeon_encoder_is_digital(encoder)) { + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + r600_hdmi_disable(encoder); +diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c +index 9ac57a0..dc1634b 100644 +--- a/drivers/gpu/drm/radeon/radeon_fb.c ++++ b/drivers/gpu/drm/radeon/radeon_fb.c +@@ -23,10 +23,6 @@ + * Authors: + * David Airlie + */ +- /* +- * Modularization +- */ +- + #include + #include + #include +@@ -42,17 +38,21 @@ + + #include + +-struct radeon_fb_device { ++/* object hierarchy - ++ this contains a helper + a radeon fb ++ the helper contains a pointer to radeon framebuffer baseclass. ++*/ ++struct radeon_fbdev { + struct drm_fb_helper helper; +- struct radeon_framebuffer *rfb; +- struct radeon_device *rdev; ++ struct radeon_framebuffer rfb; ++ struct list_head fbdev_list; ++ struct radeon_device *rdev; + }; + + static struct fb_ops radeonfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = { + .fb_setcmap = drm_fb_helper_setcmap, + }; + +-/** +- * Currently it is assumed that the old framebuffer is reused. +- * +- * LOCKING +- * caller should hold the mode config lock. +- * +- */ +-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) +-{ +- struct fb_info *info; +- struct drm_framebuffer *fb; +- struct drm_display_mode *mode = crtc->desired_mode; +- +- fb = crtc->fb; +- if (fb == NULL) { +- return 1; +- } +- info = fb->fbdev; +- if (info == NULL) { +- return 1; +- } +- if (mode == NULL) { +- return 1; +- } +- info->var.xres = mode->hdisplay; +- info->var.right_margin = mode->hsync_start - mode->hdisplay; +- info->var.hsync_len = mode->hsync_end - mode->hsync_start; +- info->var.left_margin = mode->htotal - mode->hsync_end; +- info->var.yres = mode->vdisplay; +- info->var.lower_margin = mode->vsync_start - mode->vdisplay; +- info->var.vsync_len = mode->vsync_end - mode->vsync_start; +- info->var.upper_margin = mode->vtotal - mode->vsync_end; +- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; +- /* avoid overflow */ +- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; +- +- return 0; +-} +-EXPORT_SYMBOL(radeonfb_resize); + + static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) + { +@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo + return aligned; + } + +-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { +- .gamma_set = radeon_crtc_fb_gamma_set, +- .gamma_get = radeon_crtc_fb_gamma_get, +-}; ++static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) ++{ ++ struct radeon_bo *rbo = gobj->driver_private; ++ int ret; ++ ++ ret = radeon_bo_reserve(rbo, false); ++ if (likely(ret == 0)) { ++ radeon_bo_kunmap(rbo); ++ radeon_bo_unreserve(rbo); ++ } ++ drm_gem_object_unreference_unlocked(gobj); ++} + +-int radeonfb_create(struct drm_device *dev, +- uint32_t fb_width, uint32_t fb_height, +- uint32_t surface_width, uint32_t surface_height, +- uint32_t surface_depth, uint32_t surface_bpp, +- struct drm_framebuffer **fb_p) ++static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object **gobj_p) + { +- struct radeon_device *rdev = dev->dev_private; +- struct fb_info *info; +- struct radeon_fb_device *rfbdev; +- struct drm_framebuffer *fb = NULL; +- struct radeon_framebuffer *rfb; +- struct drm_mode_fb_cmd mode_cmd; ++ struct radeon_device *rdev = rfbdev->rdev; + struct drm_gem_object *gobj = NULL; + struct radeon_bo *rbo = NULL; +- struct device *device = &rdev->pdev->dev; +- int size, aligned_size, ret; +- u64 fb_gpuaddr; +- void *fbptr = NULL; +- unsigned long tmp; + bool fb_tiled = false; /* useful for testing */ + u32 tiling_flags = 0; ++ int ret; ++ int aligned_size, size; + +- mode_cmd.width = surface_width; +- mode_cmd.height = surface_height; +- +- /* avivo can't scanout real 24bpp */ +- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) +- surface_bpp = 32; +- +- mode_cmd.bpp = surface_bpp; + /* need to align pitch with crtc limits */ +- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); +- mode_cmd.depth = surface_depth; ++ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); + +- size = mode_cmd.pitch * mode_cmd.height; ++ size = mode_cmd->pitch * mode_cmd->height; + aligned_size = ALIGN(size, PAGE_SIZE); +- + ret = radeon_gem_object_create(rdev, aligned_size, 0, +- RADEON_GEM_DOMAIN_VRAM, +- false, ttm_bo_type_kernel, +- &gobj); ++ RADEON_GEM_DOMAIN_VRAM, ++ false, ttm_bo_type_kernel, ++ &gobj); + if (ret) { +- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", +- surface_width, surface_height); +- ret = -ENOMEM; +- goto out; ++ printk(KERN_ERR "failed to allocate framebuffer (%d)\n", ++ aligned_size); ++ return -ENOMEM; + } + rbo = gobj->driver_private; + +@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev, + tiling_flags = RADEON_TILING_MACRO; + + #ifdef __BIG_ENDIAN +- switch (mode_cmd.bpp) { ++ switch (mode_cmd->bpp) { + case 32: + tiling_flags |= RADEON_TILING_SWAP_32BIT; + break; +@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev, + + if (tiling_flags) { + ret = radeon_bo_set_tiling_flags(rbo, +- tiling_flags | RADEON_TILING_SURFACE, +- mode_cmd.pitch); ++ tiling_flags | RADEON_TILING_SURFACE, ++ mode_cmd->pitch); + if (ret) + dev_err(rdev->dev, "FB failed to set tiling flags\n"); + } +- mutex_lock(&rdev->ddev->struct_mutex); +- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); +- if (fb == NULL) { +- DRM_ERROR("failed to allocate fb.\n"); +- ret = -ENOMEM; +- goto out_unref; +- } ++ ++ + ret = radeon_bo_reserve(rbo, false); + if (unlikely(ret != 0)) + goto out_unref; +- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); ++ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL); + if (ret) { + radeon_bo_unreserve(rbo); + goto out_unref; + } + if (fb_tiled) + radeon_bo_check_tiling(rbo, 0, 0); +- ret = radeon_bo_kmap(rbo, &fbptr); ++ ret = radeon_bo_kmap(rbo, NULL); + radeon_bo_unreserve(rbo); + if (ret) { + goto out_unref; + } + +- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); ++ *gobj_p = gobj; ++ return 0; ++out_unref: ++ radeonfb_destroy_pinned_object(gobj); ++ *gobj_p = NULL; ++ return ret; ++} + +- *fb_p = fb; +- rfb = to_radeon_framebuffer(fb); +- rdev->fbdev_rfb = rfb; +- rdev->fbdev_rbo = rbo; ++static int radeonfb_create(struct radeon_fbdev *rfbdev, ++ struct drm_fb_helper_surface_size *sizes) ++{ ++ struct radeon_device *rdev = rfbdev->rdev; ++ struct fb_info *info; ++ struct drm_framebuffer *fb = NULL; ++ struct drm_mode_fb_cmd mode_cmd; ++ struct drm_gem_object *gobj = NULL; ++ struct radeon_bo *rbo = NULL; ++ struct device *device = &rdev->pdev->dev; ++ int ret; ++ unsigned long tmp; ++ ++ mode_cmd.width = sizes->surface_width; ++ mode_cmd.height = sizes->surface_height; ++ ++ /* avivo can't scanout real 24bpp */ ++ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) ++ sizes->surface_bpp = 32; + +- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); ++ mode_cmd.bpp = sizes->surface_bpp; ++ mode_cmd.depth = sizes->surface_depth; ++ ++ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); ++ rbo = gobj->driver_private; ++ ++ /* okay we have an object now allocate the framebuffer */ ++ info = framebuffer_alloc(0, device); + if (info == NULL) { + ret = -ENOMEM; + goto out_unref; + } + +- rdev->fbdev_info = info; +- rfbdev = info->par; +- rfbdev->helper.funcs = &radeon_fb_helper_funcs; +- rfbdev->helper.dev = dev; +- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc, +- RADEONFB_CONN_LIMIT); +- if (ret) +- goto out_unref; ++ info->par = rfbdev; ++ ++ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); ++ ++ fb = &rfbdev->rfb.base; ++ ++ /* setup helper */ ++ rfbdev->helper.fb = fb; ++ rfbdev->helper.fbdev = info; + +- memset_io(fbptr, 0x0, aligned_size); ++ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); + + strcpy(info->fix.id, "radeondrmfb"); + +@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev, + info->flags = FBINFO_DEFAULT; + info->fbops = &radeonfb_ops; + +- tmp = fb_gpuaddr - rdev->mc.vram_start; ++ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; + info->fix.smem_start = rdev->mc.aper_base + tmp; +- info->fix.smem_len = size; +- info->screen_base = fbptr; +- info->screen_size = size; ++ info->fix.smem_len = radeon_bo_size(rbo); ++ info->screen_base = rbo->kptr; ++ info->screen_size = radeon_bo_size(rbo); + +- drm_fb_helper_fill_var(info, fb, fb_width, fb_height); ++ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); + + /* setup aperture base/size for vesafb takeover */ +- info->aperture_base = rdev->ddev->mode_config.fb_base; +- info->aperture_size = rdev->mc.real_vram_size; ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ ret = -ENOMEM; ++ goto out_unref; ++ } ++ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; ++ info->apertures->ranges[0].size = rdev->mc.real_vram_size; + + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; +@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev, + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; ++ + if (info->screen_base == NULL) { + ret = -ENOSPC; + goto out_unref; + } ++ ++ ret = fb_alloc_cmap(&info->cmap, 256, 0); ++ if (ret) { ++ ret = -ENOMEM; ++ goto out_unref; ++ } ++ + DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); + DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); +- DRM_INFO("size %lu\n", (unsigned long)size); ++ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); + DRM_INFO("fb depth is %d\n", fb->depth); + DRM_INFO(" pitch is %d\n", fb->pitch); + +- fb->fbdev = info; +- rfbdev->rfb = rfb; +- rfbdev->rdev = rdev; +- +- mutex_unlock(&rdev->ddev->struct_mutex); + vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); + return 0; + + out_unref: + if (rbo) { +- ret = radeon_bo_reserve(rbo, false); +- if (likely(ret == 0)) { +- radeon_bo_kunmap(rbo); +- radeon_bo_unreserve(rbo); +- } ++ + } + if (fb && ret) { +- list_del(&fb->filp_head); + drm_gem_object_unreference(gobj); + drm_framebuffer_cleanup(fb); + kfree(fb); + } +- drm_gem_object_unreference(gobj); +- mutex_unlock(&rdev->ddev->struct_mutex); +-out: + return ret; + } + ++static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, ++ struct drm_fb_helper_surface_size *sizes) ++{ ++ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; ++ int new_fb = 0; ++ int ret; ++ ++ if (!helper->fb) { ++ ret = radeonfb_create(rfbdev, sizes); ++ if (ret) ++ return ret; ++ new_fb = 1; ++ } ++ return new_fb; ++} ++ + static char *mode_option; + int radeon_parse_options(char *options) + { +@@ -328,46 +316,108 @@ int radeon_parse_options(char *options) + return 0; + } + +-int radeonfb_probe(struct drm_device *dev) ++void radeon_fb_output_poll_changed(struct radeon_device *rdev) + { +- struct radeon_device *rdev = dev->dev_private; +- int bpp_sel = 32; +- +- /* select 8 bpp console on RN50 or 16MB cards */ +- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) +- bpp_sel = 8; +- +- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); ++ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); + } + +-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) ++static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) + { + struct fb_info *info; +- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); ++ struct radeon_framebuffer *rfb = &rfbdev->rfb; + struct radeon_bo *rbo; + int r; + +- if (!fb) { +- return -EINVAL; ++ if (rfbdev->helper.fbdev) { ++ info = rfbdev->helper.fbdev; ++ ++ unregister_framebuffer(info); ++ if (info->cmap.len) ++ fb_dealloc_cmap(&info->cmap); ++ framebuffer_release(info); + } +- info = fb->fbdev; +- if (info) { +- struct radeon_fb_device *rfbdev = info->par; ++ ++ if (rfb->obj) { + rbo = rfb->obj->driver_private; +- unregister_framebuffer(info); + r = radeon_bo_reserve(rbo, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rbo); + radeon_bo_unpin(rbo); + radeon_bo_unreserve(rbo); + } +- drm_fb_helper_free(&rfbdev->helper); +- framebuffer_release(info); ++ drm_gem_object_unreference_unlocked(rfb->obj); + } ++ drm_fb_helper_fini(&rfbdev->helper); ++ drm_framebuffer_cleanup(&rfb->base); + +- printk(KERN_INFO "unregistered panic notifier\n"); ++ return 0; ++} + ++static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { ++ .gamma_set = radeon_crtc_fb_gamma_set, ++ .gamma_get = radeon_crtc_fb_gamma_get, ++ .fb_probe = radeon_fb_find_or_create_single, ++}; ++ ++int radeon_fbdev_init(struct radeon_device *rdev) ++{ ++ struct radeon_fbdev *rfbdev; ++ int bpp_sel = 32; ++ int ret; ++ ++ /* select 8 bpp console on RN50 or 16MB cards */ ++ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) ++ bpp_sel = 8; ++ ++ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); ++ if (!rfbdev) ++ return -ENOMEM; ++ ++ rfbdev->rdev = rdev; ++ rdev->mode_info.rfbdev = rfbdev; ++ rfbdev->helper.funcs = &radeon_fb_helper_funcs; ++ ++ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, ++ rdev->num_crtc, ++ RADEONFB_CONN_LIMIT); ++ if (ret) { ++ kfree(rfbdev); ++ return ret; ++ } ++ ++ drm_fb_helper_single_add_all_connectors(&rfbdev->helper); ++ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); + return 0; + } +-EXPORT_SYMBOL(radeonfb_remove); +-MODULE_LICENSE("GPL"); ++ ++void radeon_fbdev_fini(struct radeon_device *rdev) ++{ ++ if (!rdev->mode_info.rfbdev) ++ return; ++ ++ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); ++ kfree(rdev->mode_info.rfbdev); ++ rdev->mode_info.rfbdev = NULL; ++} ++ ++void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) ++{ ++ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); ++} ++ ++int radeon_fbdev_total_size(struct radeon_device *rdev) ++{ ++ struct radeon_bo *robj; ++ int size = 0; ++ ++ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; ++ size += radeon_bo_size(robj); ++ return size; ++} ++ ++bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) ++{ ++ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) ++ return true; ++ return false; ++} +diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c +index d90f95b..b1f9a81 100644 +--- a/drivers/gpu/drm/radeon/radeon_fence.c ++++ b/drivers/gpu/drm/radeon/radeon_fence.c +@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) + radeon_fence_ring_emit(rdev, fence); + + fence->emited = true; +- fence->timeout = jiffies + ((2000 * HZ) / 1000); + list_del(&fence->list); + list_add_tail(&fence->list, &rdev->fence_drv.emited); + write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); +@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) + struct list_head *i, *n; + uint32_t seq; + bool wake = false; ++ unsigned long cjiffies; + +- if (rdev == NULL) { +- return true; +- } +- if (rdev->shutdown) { +- return true; +- } + seq = RREG32(rdev->fence_drv.scratch_reg); +- rdev->fence_drv.last_seq = seq; ++ if (seq != rdev->fence_drv.last_seq) { ++ rdev->fence_drv.last_seq = seq; ++ rdev->fence_drv.last_jiffies = jiffies; ++ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; ++ } else { ++ cjiffies = jiffies; ++ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { ++ cjiffies -= rdev->fence_drv.last_jiffies; ++ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { ++ /* update the timeout */ ++ rdev->fence_drv.last_timeout -= cjiffies; ++ } else { ++ /* the 500ms timeout is elapsed we should test ++ * for GPU lockup ++ */ ++ rdev->fence_drv.last_timeout = 1; ++ } ++ } else { ++ /* wrap around update last jiffies, we will just wait ++ * a little longer ++ */ ++ rdev->fence_drv.last_jiffies = cjiffies; ++ } ++ return false; ++ } + n = NULL; + list_for_each(i, &rdev->fence_drv.emited) { + fence = list_entry(i, struct radeon_fence, list); +@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence) + int radeon_fence_wait(struct radeon_fence *fence, bool intr) + { + struct radeon_device *rdev; +- unsigned long cur_jiffies; +- unsigned long timeout; +- bool expired = false; ++ unsigned long irq_flags, timeout; ++ u32 seq; + int r; + + if (fence == NULL) { +@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) + if (radeon_fence_signaled(fence)) { + return 0; + } +- ++ timeout = rdev->fence_drv.last_timeout; + retry: +- cur_jiffies = jiffies; +- timeout = HZ / 100; +- if (time_after(fence->timeout, cur_jiffies)) { +- timeout = fence->timeout - cur_jiffies; +- } +- ++ /* save current sequence used to check for GPU lockup */ ++ seq = rdev->fence_drv.last_seq; + if (intr) { + radeon_irq_kms_sw_irq_get(rdev); + r = wait_event_interruptible_timeout(rdev->fence_drv.queue, + radeon_fence_signaled(fence), timeout); + radeon_irq_kms_sw_irq_put(rdev); +- if (unlikely(r < 0)) ++ if (unlikely(r < 0)) { + return r; ++ } + } else { + radeon_irq_kms_sw_irq_get(rdev); + r = wait_event_timeout(rdev->fence_drv.queue, +@@ -206,38 +220,36 @@ retry: + radeon_irq_kms_sw_irq_put(rdev); + } + if (unlikely(!radeon_fence_signaled(fence))) { +- if (unlikely(r == 0)) { +- expired = true; ++ /* we were interrupted for some reason and fence isn't ++ * isn't signaled yet, resume wait ++ */ ++ if (r) { ++ timeout = r; ++ goto retry; + } +- if (unlikely(expired)) { +- timeout = 1; +- if (time_after(cur_jiffies, fence->timeout)) { +- timeout = cur_jiffies - fence->timeout; +- } +- timeout = jiffies_to_msecs(timeout); +- if (timeout > 500) { +- DRM_ERROR("fence(%p:0x%08X) %lums timeout " +- "going to reset GPU\n", +- fence, fence->seq, timeout); +- radeon_gpu_reset(rdev); +- WREG32(rdev->fence_drv.scratch_reg, fence->seq); +- } ++ /* don't protect read access to rdev->fence_drv.last_seq ++ * if we experiencing a lockup the value doesn't change ++ */ ++ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { ++ /* good news we believe it's a lockup */ ++ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); ++ /* FIXME: what should we do ? marking everyone ++ * as signaled for now ++ */ ++ rdev->gpu_lockup = true; ++ r = radeon_gpu_reset(rdev); ++ if (r) ++ return r; ++ WREG32(rdev->fence_drv.scratch_reg, fence->seq); ++ rdev->gpu_lockup = false; + } ++ timeout = RADEON_FENCE_JIFFIES_TIMEOUT; ++ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); ++ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; ++ rdev->fence_drv.last_jiffies = jiffies; ++ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); + goto retry; + } +- if (unlikely(expired)) { +- rdev->fence_drv.count_timeout++; +- cur_jiffies = jiffies; +- timeout = 1; +- if (time_after(cur_jiffies, fence->timeout)) { +- timeout = cur_jiffies - fence->timeout; +- } +- timeout = jiffies_to_msecs(timeout); +- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n", +- fence, fence->seq, timeout); +- DRM_ERROR("last signaled fence(0x%08X)\n", +- rdev->fence_drv.last_seq); +- } + return 0; + } + +@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev) + INIT_LIST_HEAD(&rdev->fence_drv.created); + INIT_LIST_HEAD(&rdev->fence_drv.emited); + INIT_LIST_HEAD(&rdev->fence_drv.signaled); +- rdev->fence_drv.count_timeout = 0; + init_waitqueue_head(&rdev->fence_drv.queue); + rdev->fence_drv.initialized = true; + write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); +diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h +deleted file mode 100644 +index 3d4d84e..0000000 +--- a/drivers/gpu/drm/radeon/radeon_fixed.h ++++ /dev/null +@@ -1,67 +0,0 @@ +-/* +- * Copyright 2009 Red Hat Inc. +- * +- * Permission is hereby granted, free of charge, to any person obtaining a +- * copy of this software and associated documentation files (the "Software"), +- * to deal in the Software without restriction, including without limitation +- * the rights to use, copy, modify, merge, publish, distribute, sublicense, +- * and/or sell copies of the Software, and to permit persons to whom the +- * Software is furnished to do so, subject to the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +- * OTHER DEALINGS IN THE SOFTWARE. +- * +- * Authors: Dave Airlie +- */ +-#ifndef RADEON_FIXED_H +-#define RADEON_FIXED_H +- +-typedef union rfixed { +- u32 full; +-} fixed20_12; +- +- +-#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ +-#define rfixed_const_half(A) (u32)(((A) << 12) + 2048) +-#define rfixed_const_666(A) (u32)(((A) << 12) + 2731) +-#define rfixed_const_8(A) (u32)(((A) << 12) + 3277) +-#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) +-#define fixed_init(A) { .full = rfixed_const((A)) } +-#define fixed_init_half(A) { .full = rfixed_const_half((A)) } +-#define rfixed_trunc(A) ((A).full >> 12) +- +-static inline u32 rfixed_floor(fixed20_12 A) +-{ +- u32 non_frac = rfixed_trunc(A); +- +- return rfixed_const(non_frac); +-} +- +-static inline u32 rfixed_ceil(fixed20_12 A) +-{ +- u32 non_frac = rfixed_trunc(A); +- +- if (A.full > rfixed_const(non_frac)) +- return rfixed_const(non_frac + 1); +- else +- return rfixed_const(non_frac); +-} +- +-static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) +-{ +- u64 tmp = ((u64)A.full << 13); +- +- do_div(tmp, B.full); +- tmp += 1; +- tmp /= 2; +- return lower_32_bits(tmp); +-} +-#endif +diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c +index 1770d3c..e65b903 100644 +--- a/drivers/gpu/drm/radeon/radeon_gart.c ++++ b/drivers/gpu/drm/radeon/radeon_gart.c +@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, + int i, j; + + if (!rdev->gart.ready) { +- DRM_ERROR("trying to bind memory to unitialized GART !\n"); ++ WARN(1, "trying to bind memory to unitialized GART !\n"); + return -EINVAL; + } + t = offset / RADEON_GPU_PAGE_SIZE; +diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c +index ef92d14..a72a3ee 100644 +--- a/drivers/gpu/drm/radeon/radeon_gem.c ++++ b/drivers/gpu/drm/radeon/radeon_gem.c +@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) + if (robj) { + radeon_bo_unref(&robj); + } ++ ++ drm_gem_object_release(gobj); ++ kfree(gobj); + } + + int radeon_gem_object_create(struct radeon_device *rdev, int size, +@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, + args->vram_visible = rdev->mc.real_vram_size; + if (rdev->stollen_vga_memory) + args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); +- if (rdev->fbdev_rbo) +- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo); ++ args->vram_visible -= radeon_fbdev_total_size(rdev); + args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - + RADEON_IB_POOL_SIZE*64*1024; + return 0; +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index a212041..059bfa4 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -26,6 +26,7 @@ + * Jerome Glisse + */ + #include "drmP.h" ++#include "drm_crtc_helper.h" + #include "radeon_drm.h" + #include "radeon_reg.h" + #include "radeon.h" +@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work) + radeon_connector_hotplug(connector); + } + /* Just fire off a uevent and let userspace tell us what to do */ +- drm_sysfs_hotplug_event(dev); ++ drm_helper_hpd_irq_event(dev); + } + + void radeon_driver_irq_preinstall_kms(struct drm_device *dev) +@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) + + /* Disable *all* interrupts */ + rdev->irq.sw_int = false; ++ rdev->irq.gui_idle = false; + for (i = 0; i < rdev->num_crtc; i++) + rdev->irq.crtc_vblank_int[i] = false; + for (i = 0; i < 6; i++) +@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) + } + /* Disable *all* interrupts */ + rdev->irq.sw_int = false; ++ rdev->irq.gui_idle = false; + for (i = 0; i < rdev->num_crtc; i++) + rdev->irq.crtc_vblank_int[i] = false; + for (i = 0; i < 6; i++) +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c +index c633319..6a70c0d 100644 +--- a/drivers/gpu/drm/radeon/radeon_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_kms.c +@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + { + struct radeon_device *rdev = dev->dev_private; + struct drm_radeon_info *info; ++ struct radeon_mode_info *minfo = &rdev->mode_info; + uint32_t *value_ptr; + uint32_t value; ++ struct drm_crtc *crtc; ++ int i, found; + + info = data; + value_ptr = (uint32_t *)((unsigned long)info->value); ++ value = *value_ptr; + switch (info->request) { + case RADEON_INFO_DEVICE_ID: + value = dev->pci_device; +@@ -114,6 +118,27 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + value = rdev->num_z_pipes; + break; + case RADEON_INFO_ACCEL_WORKING: ++ /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ ++ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) ++ value = false; ++ else ++ value = rdev->accel_working; ++ break; ++ case RADEON_INFO_CRTC_FROM_ID: ++ for (i = 0, found = 0; i < rdev->num_crtc; i++) { ++ crtc = (struct drm_crtc *)minfo->crtcs[i]; ++ if (crtc && crtc->base.id == value) { ++ value = i; ++ found = 1; ++ break; ++ } ++ } ++ if (!found) { ++ DRM_DEBUG("unknown crtc id %d\n", value); ++ return -EINVAL; ++ } ++ break; ++ case RADEON_INFO_ACCEL_WORKING2: + value = rdev->accel_working; + break; + default: +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +index 88865e3..e1e5255 100644 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +@@ -26,7 +26,7 @@ + #include + #include + #include +-#include "radeon_fixed.h" ++#include + #include "radeon.h" + #include "atom.h" + +@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) + + switch (mode) { + case DRM_MODE_DPMS_ON: ++ radeon_crtc->enabled = true; ++ /* adjust pm to dpms changes BEFORE enabling crtcs */ ++ radeon_pm_compute_clocks(rdev); + if (radeon_crtc->crtc_id) + WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); + else { +@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) + RADEON_CRTC_DISP_REQ_EN_B)); + WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); + } ++ radeon_crtc->enabled = false; ++ /* adjust pm to dpms changes AFTER disabling crtcs */ ++ radeon_pm_compute_clocks(rdev); + break; + } + } +@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { ++ struct drm_device *dev = crtc->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ ++ /* adjust pm to upcoming mode change */ ++ radeon_pm_compute_clocks(rdev); ++ + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; + return true; +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +index 0274abe..5b07b88 100644 +--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c ++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) + else + radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +- /* adjust pm to dpms change */ +- radeon_pm_compute_clocks(rdev); + } + + static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) +@@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) + { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); +- struct drm_device *dev = encoder->dev; +- struct radeon_device *rdev = dev->dev_private; +- +- /* adjust pm to upcoming mode change */ +- radeon_pm_compute_clocks(rdev); + + /* set the active encoder to connector routing */ + radeon_encoder_set_active_device(encoder); +@@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode + else + radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +- /* adjust pm to dpms change */ +- radeon_pm_compute_clocks(rdev); + } + + static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) +@@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) + else + radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +- /* adjust pm to dpms change */ +- radeon_pm_compute_clocks(rdev); + } + + static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) +@@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) + else + radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +- /* adjust pm to dpms change */ +- radeon_pm_compute_clocks(rdev); + } + + static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) +@@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) + else + radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + +- /* adjust pm to dpms change */ +- radeon_pm_compute_clocks(rdev); + } + + static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) +@@ -1183,6 +1168,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; + bool color = true; ++ struct drm_crtc *crtc; ++ ++ /* find out if crtc2 is in use or if this encoder is using it */ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { ++ if (encoder->crtc != crtc) { ++ return connector_status_disconnected; ++ } ++ } ++ } + + if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || + connector->connector_type == DRM_MODE_CONNECTOR_Composite || +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h +index 5413fcd..67358ba 100644 +--- a/drivers/gpu/drm/radeon/radeon_mode.h ++++ b/drivers/gpu/drm/radeon/radeon_mode.h +@@ -34,11 +34,12 @@ + #include + #include + #include ++#include + #include + #include + #include +-#include "radeon_fixed.h" + ++struct radeon_bo; + struct radeon_device; + + #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) +@@ -65,6 +66,16 @@ enum radeon_tv_std { + TV_STD_PAL_N, + }; + ++enum radeon_hpd_id { ++ RADEON_HPD_1 = 0, ++ RADEON_HPD_2, ++ RADEON_HPD_3, ++ RADEON_HPD_4, ++ RADEON_HPD_5, ++ RADEON_HPD_6, ++ RADEON_HPD_NONE = 0xff, ++}; ++ + /* radeon gpio-based i2c + * 1. "mask" reg and bits + * grabs the gpio pins for software use +@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec { + /* id used by atom */ + uint8_t i2c_id; + /* id used by atom */ +- uint8_t hpd_id; ++ enum radeon_hpd_id hpd; + /* can be used with hw i2c engine */ + bool hw_capable; + /* uses multi-media i2c engine */ +@@ -202,6 +213,8 @@ enum radeon_dvo_chip { + DVO_SIL1178, + }; + ++struct radeon_fbdev; ++ + struct radeon_mode_info { + struct atom_context *atom_context; + struct card_info *atom_card_info; +@@ -218,6 +231,9 @@ struct radeon_mode_info { + struct drm_property *tmds_pll_property; + /* hardcoded DFP edid from BIOS */ + struct edid *bios_hardcoded_edid; ++ ++ /* pointer to fbdev info structure */ ++ struct radeon_fbdev *rfbdev; + }; + + #define MAX_H_CODE_TIMING_LEN 32 +@@ -339,6 +355,7 @@ struct radeon_encoder { + enum radeon_rmx_type rmx_type; + struct drm_display_mode native_mode; + void *enc_priv; ++ int audio_polling_active; + int hdmi_offset; + int hdmi_config_offset; + int hdmi_audio_workaround; +@@ -363,16 +380,6 @@ struct radeon_gpio_rec { + u32 mask; + }; + +-enum radeon_hpd_id { +- RADEON_HPD_NONE = 0, +- RADEON_HPD_1, +- RADEON_HPD_2, +- RADEON_HPD_3, +- RADEON_HPD_4, +- RADEON_HPD_5, +- RADEON_HPD_6, +-}; +- + struct radeon_hpd { + enum radeon_hpd_id hpd; + u8 plugged_state; +@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno); + extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); +-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, +- struct drm_mode_fb_cmd *mode_cmd, +- struct drm_gem_object *obj); +- +-int radeonfb_probe(struct drm_device *dev); ++void radeon_framebuffer_init(struct drm_device *dev, ++ struct radeon_framebuffer *rfb, ++ struct drm_mode_fb_cmd *mode_cmd, ++ struct drm_gem_object *obj); + + int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); + bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); +@@ -575,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder, + void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); ++ ++/* fbdev layer */ ++int radeon_fbdev_init(struct radeon_device *rdev); ++void radeon_fbdev_fini(struct radeon_device *rdev); ++void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); ++int radeon_fbdev_total_size(struct radeon_device *rdev); ++bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); ++ ++void radeon_fb_output_poll_changed(struct radeon_device *rdev); + #endif +diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c +index 1227747..d5b9373 100644 +--- a/drivers/gpu/drm/radeon/radeon_object.c ++++ b/drivers/gpu/drm/radeon/radeon_object.c +@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, + + radeon_ttm_placement_from_domain(bo, domain); + /* Kernel allocation are uninterruptible */ ++ mutex_lock(&rdev->vram_mutex); + r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, + &bo->placement, 0, 0, !kernel, NULL, size, + &radeon_ttm_bo_destroy); ++ mutex_unlock(&rdev->vram_mutex); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(rdev->dev, +@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo) + void radeon_bo_unref(struct radeon_bo **bo) + { + struct ttm_buffer_object *tbo; ++ struct radeon_device *rdev; + + if ((*bo) == NULL) + return; ++ rdev = (*bo)->rdev; + tbo = &((*bo)->tbo); ++ mutex_lock(&rdev->vram_mutex); + ttm_bo_unref(&tbo); ++ mutex_unlock(&rdev->vram_mutex); + if (tbo == NULL) + *bo = NULL; + } +@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) + } + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; +- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); ++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); + if (likely(r == 0)) { + bo->pin_count = 1; + if (gpu_addr != NULL) +@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) + return 0; + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; +- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); ++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); + if (unlikely(r != 0)) + dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); + return r; +@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head) + r = radeon_bo_reserve(lobj->bo, false); + if (unlikely(r != 0)) + return r; ++ lobj->reserved = true; + } + return 0; + } +@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head) + + list_for_each_entry(lobj, head, list) { + /* only unreserve object we successfully reserved */ +- if (radeon_bo_is_reserved(lobj->bo)) ++ if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) + radeon_bo_unreserve(lobj->bo); + } + } +@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head) + struct radeon_bo *bo; + int r; + ++ list_for_each_entry(lobj, head, list) { ++ lobj->reserved = false; ++ } + r = radeon_bo_list_reserve(head); + if (unlikely(r != 0)) { + return r; +@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head) + lobj->rdomain); + } + r = ttm_bo_validate(&bo->tbo, &bo->placement, +- true, false); ++ true, false, false); + if (unlikely(r)) + return r; + } +@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, + radeon_bo_check_tiling(rbo, 0, 1); + } + +-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ++int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) + { ++ struct radeon_device *rdev; + struct radeon_bo *rbo; ++ unsigned long offset, size; ++ int r; ++ + if (!radeon_ttm_bo_is_radeon_bo(bo)) +- return; ++ return 0; + rbo = container_of(bo, struct radeon_bo, tbo); + radeon_bo_check_tiling(rbo, 0, 0); ++ rdev = rbo->rdev; ++ if (bo->mem.mem_type == TTM_PL_VRAM) { ++ size = bo->mem.num_pages << PAGE_SHIFT; ++ offset = bo->mem.mm_node->start << PAGE_SHIFT; ++ if ((offset + size) > rdev->mc.visible_vram_size) { ++ /* hurrah the memory is not visible ! */ ++ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); ++ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; ++ r = ttm_bo_validate(bo, &rbo->placement, false, true, false); ++ if (unlikely(r != 0)) ++ return r; ++ offset = bo->mem.mm_node->start << PAGE_SHIFT; ++ /* this should not happen */ ++ if ((offset + size) > rdev->mc.visible_vram_size) ++ return -EINVAL; ++ } ++ } ++ return 0; + } +diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h +index 7ab43de..353998d 100644 +--- a/drivers/gpu/drm/radeon/radeon_object.h ++++ b/drivers/gpu/drm/radeon/radeon_object.h +@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, + bool force_drop); + extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); ++extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); + extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); + #endif +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +index a4b5749..63f679a 100644 +--- a/drivers/gpu/drm/radeon/radeon_pm.c ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -23,25 +23,17 @@ + #include "drmP.h" + #include "radeon.h" + #include "avivod.h" ++#ifdef CONFIG_ACPI ++#include ++#endif ++#include + + #define RADEON_IDLE_LOOP_MS 100 + #define RADEON_RECLOCK_DELAY_MS 200 + #define RADEON_WAIT_VBLANK_TIMEOUT 200 ++#define RADEON_WAIT_IDLE_TIMEOUT 200 + +-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); +-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); +-static void radeon_pm_set_clocks(struct radeon_device *rdev); +-static void radeon_pm_idle_work_handler(struct work_struct *work); +-static int radeon_debugfs_pm_init(struct radeon_device *rdev); +- +-static const char *pm_state_names[4] = { +- "PM_STATE_DISABLED", +- "PM_STATE_MINIMUM", +- "PM_STATE_PAUSED", +- "PM_STATE_ACTIVE" +-}; +- +-static const char *pm_state_types[5] = { ++static const char *radeon_pm_state_type_name[5] = { + "Default", + "Powersave", + "Battery", +@@ -49,138 +41,109 @@ static const char *pm_state_types[5] = { + "Performance", + }; + +-static void radeon_print_power_mode_info(struct radeon_device *rdev) ++static void radeon_dynpm_idle_work_handler(struct work_struct *work); ++static int radeon_debugfs_pm_init(struct radeon_device *rdev); ++static bool radeon_pm_in_vbl(struct radeon_device *rdev); ++static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); ++static void radeon_pm_update_profile(struct radeon_device *rdev); ++static void radeon_pm_set_clocks(struct radeon_device *rdev); ++ ++#define ACPI_AC_CLASS "ac_adapter" ++ ++#ifdef CONFIG_ACPI ++static int radeon_acpi_event(struct notifier_block *nb, ++ unsigned long val, ++ void *data) + { +- int i, j; +- bool is_default; ++ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); ++ struct acpi_bus_event *entry = (struct acpi_bus_event *)data; + +- DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); +- for (i = 0; i < rdev->pm.num_power_states; i++) { +- if (rdev->pm.default_power_state == &rdev->pm.power_state[i]) +- is_default = true; ++ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { ++ if (power_supply_is_system_supplied() > 0) ++ DRM_DEBUG("pm: AC\n"); + else +- is_default = false; +- DRM_INFO("State %d %s %s\n", i, +- pm_state_types[rdev->pm.power_state[i].type], +- is_default ? "(default)" : ""); +- if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) +- DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes); +- DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); +- for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { +- if (rdev->flags & RADEON_IS_IGP) +- DRM_INFO("\t\t%d engine: %d\n", +- j, +- rdev->pm.power_state[i].clock_info[j].sclk * 10); +- else +- DRM_INFO("\t\t%d engine/memory: %d/%d\n", +- j, +- rdev->pm.power_state[i].clock_info[j].sclk * 10, +- rdev->pm.power_state[i].clock_info[j].mclk * 10); ++ DRM_DEBUG("pm: DC\n"); ++ ++ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { ++ if (rdev->pm.profile == PM_PROFILE_AUTO) { ++ mutex_lock(&rdev->pm.mutex); ++ radeon_pm_update_profile(rdev); ++ radeon_pm_set_clocks(rdev); ++ mutex_unlock(&rdev->pm.mutex); ++ } + } + } ++ ++ return NOTIFY_OK; + } ++#endif + +-static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev, +- enum radeon_pm_state_type type) ++static void radeon_pm_update_profile(struct radeon_device *rdev) + { +- int i, j; +- enum radeon_pm_state_type wanted_types[2]; +- int wanted_count; +- +- switch (type) { +- case POWER_STATE_TYPE_DEFAULT: +- default: +- return rdev->pm.default_power_state; +- case POWER_STATE_TYPE_POWERSAVE: +- if (rdev->flags & RADEON_IS_MOBILITY) { +- wanted_types[0] = POWER_STATE_TYPE_POWERSAVE; +- wanted_types[1] = POWER_STATE_TYPE_BATTERY; +- wanted_count = 2; +- } else { +- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; +- wanted_count = 1; +- } ++ switch (rdev->pm.profile) { ++ case PM_PROFILE_DEFAULT: ++ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; + break; +- case POWER_STATE_TYPE_BATTERY: +- if (rdev->flags & RADEON_IS_MOBILITY) { +- wanted_types[0] = POWER_STATE_TYPE_BATTERY; +- wanted_types[1] = POWER_STATE_TYPE_POWERSAVE; +- wanted_count = 2; ++ case PM_PROFILE_AUTO: ++ if (power_supply_is_system_supplied() > 0) { ++ if (rdev->pm.active_crtc_count > 1) ++ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; ++ else ++ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; + } else { +- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; +- wanted_count = 1; ++ if (rdev->pm.active_crtc_count > 1) ++ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; ++ else ++ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; + } + break; +- case POWER_STATE_TYPE_BALANCED: +- case POWER_STATE_TYPE_PERFORMANCE: +- wanted_types[0] = type; +- wanted_count = 1; ++ case PM_PROFILE_LOW: ++ if (rdev->pm.active_crtc_count > 1) ++ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; ++ else ++ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; + break; +- } +- +- for (i = 0; i < wanted_count; i++) { +- for (j = 0; j < rdev->pm.num_power_states; j++) { +- if (rdev->pm.power_state[j].type == wanted_types[i]) +- return &rdev->pm.power_state[j]; +- } +- } +- +- return rdev->pm.default_power_state; +-} +- +-static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev, +- struct radeon_power_state *power_state, +- enum radeon_pm_clock_mode_type type) +-{ +- switch (type) { +- case POWER_MODE_TYPE_DEFAULT: +- default: +- return power_state->default_clock_mode; +- case POWER_MODE_TYPE_LOW: +- return &power_state->clock_info[0]; +- case POWER_MODE_TYPE_MID: +- if (power_state->num_clock_modes > 2) +- return &power_state->clock_info[1]; ++ case PM_PROFILE_MID: ++ if (rdev->pm.active_crtc_count > 1) ++ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; ++ else ++ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; ++ break; ++ case PM_PROFILE_HIGH: ++ if (rdev->pm.active_crtc_count > 1) ++ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; + else +- return &power_state->clock_info[0]; ++ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; + break; +- case POWER_MODE_TYPE_HIGH: +- return &power_state->clock_info[power_state->num_clock_modes - 1]; + } + ++ if (rdev->pm.active_crtc_count == 0) { ++ rdev->pm.requested_power_state_index = ++ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; ++ rdev->pm.requested_clock_mode_index = ++ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; ++ } else { ++ rdev->pm.requested_power_state_index = ++ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; ++ rdev->pm.requested_clock_mode_index = ++ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; ++ } + } + +-static void radeon_get_power_state(struct radeon_device *rdev, +- enum radeon_pm_action action) ++static void radeon_unmap_vram_bos(struct radeon_device *rdev) + { +- switch (action) { +- case PM_ACTION_MINIMUM: +- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY); +- rdev->pm.requested_clock_mode = +- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW); +- break; +- case PM_ACTION_DOWNCLOCK: +- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE); +- rdev->pm.requested_clock_mode = +- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID); +- break; +- case PM_ACTION_UPCLOCK: +- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT); +- rdev->pm.requested_clock_mode = +- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH); +- break; +- case PM_ACTION_NONE: +- default: +- DRM_ERROR("Requested mode for not defined action\n"); ++ struct radeon_bo *bo, *n; ++ ++ if (list_empty(&rdev->gem.objects)) + return; ++ ++ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { ++ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) ++ ttm_bo_unmap_virtual(&bo->tbo); + } +- DRM_INFO("Requested: e: %d m: %d p: %d\n", +- rdev->pm.requested_clock_mode->sclk, +- rdev->pm.requested_clock_mode->mclk, +- rdev->pm.requested_power_state->non_clock_info.pcie_lanes); + } + +-static inline void radeon_sync_with_vblank(struct radeon_device *rdev) ++static void radeon_sync_with_vblank(struct radeon_device *rdev) + { + if (rdev->pm.active_crtcs) { + rdev->pm.vblank_sync = false; +@@ -192,73 +155,359 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev) + + static void radeon_set_power_state(struct radeon_device *rdev) + { +- /* if *_clock_mode are the same, *_power_state are as well */ +- if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode) +- return; ++ u32 sclk, mclk; ++ bool misc_after = false; + +- DRM_INFO("Setting: e: %d m: %d p: %d\n", +- rdev->pm.requested_clock_mode->sclk, +- rdev->pm.requested_clock_mode->mclk, +- rdev->pm.requested_power_state->non_clock_info.pcie_lanes); ++ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && ++ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) ++ return; + +- /* set pcie lanes */ +- /* TODO */ ++ if (radeon_gui_idle(rdev)) { ++ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].sclk; ++ if (sclk > rdev->clock.default_sclk) ++ sclk = rdev->clock.default_sclk; + +- /* set voltage */ +- /* TODO */ ++ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. ++ clock_info[rdev->pm.requested_clock_mode_index].mclk; ++ if (mclk > rdev->clock.default_mclk) ++ mclk = rdev->clock.default_mclk; + +- /* set engine clock */ +- radeon_sync_with_vblank(rdev); +- radeon_pm_debug_check_in_vbl(rdev, false); +- radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); +- radeon_pm_debug_check_in_vbl(rdev, true); ++ /* upvolt before raising clocks, downvolt after lowering clocks */ ++ if (sclk < rdev->pm.current_sclk) ++ misc_after = true; + +-#if 0 +- /* set memory clock */ +- if (rdev->asic->set_memory_clock) { + radeon_sync_with_vblank(rdev); +- radeon_pm_debug_check_in_vbl(rdev, false); +- radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk); +- radeon_pm_debug_check_in_vbl(rdev, true); ++ ++ if (rdev->pm.pm_method == PM_METHOD_DYNPM) { ++ if (!radeon_pm_in_vbl(rdev)) ++ return; ++ } ++ ++ radeon_pm_prepare(rdev); ++ ++ if (!misc_after) ++ /* voltage, pcie lanes, etc.*/ ++ radeon_pm_misc(rdev); ++ ++ /* set engine clock */ ++ if (sclk != rdev->pm.current_sclk) { ++ radeon_pm_debug_check_in_vbl(rdev, false); ++ radeon_set_engine_clock(rdev, sclk); ++ radeon_pm_debug_check_in_vbl(rdev, true); ++ rdev->pm.current_sclk = sclk; ++ DRM_DEBUG("Setting: e: %d\n", sclk); ++ } ++ ++ /* set memory clock */ ++ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { ++ radeon_pm_debug_check_in_vbl(rdev, false); ++ radeon_set_memory_clock(rdev, mclk); ++ radeon_pm_debug_check_in_vbl(rdev, true); ++ rdev->pm.current_mclk = mclk; ++ DRM_DEBUG("Setting: m: %d\n", mclk); ++ } ++ ++ if (misc_after) ++ /* voltage, pcie lanes, etc.*/ ++ radeon_pm_misc(rdev); ++ ++ radeon_pm_finish(rdev); ++ ++ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; ++ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; ++ } else ++ DRM_DEBUG("pm: GUI not idle!!!\n"); ++} ++ ++static void radeon_pm_set_clocks(struct radeon_device *rdev) ++{ ++ int i; ++ ++ mutex_lock(&rdev->ddev->struct_mutex); ++ mutex_lock(&rdev->vram_mutex); ++ mutex_lock(&rdev->cp.mutex); ++ ++ /* gui idle int has issues on older chips it seems */ ++ if (rdev->family >= CHIP_R600) { ++ if (rdev->irq.installed) { ++ /* wait for GPU idle */ ++ rdev->pm.gui_idle = false; ++ rdev->irq.gui_idle = true; ++ radeon_irq_set(rdev); ++ wait_event_interruptible_timeout( ++ rdev->irq.idle_queue, rdev->pm.gui_idle, ++ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); ++ rdev->irq.gui_idle = false; ++ radeon_irq_set(rdev); ++ } ++ } else { ++ if (rdev->cp.ready) { ++ struct radeon_fence *fence; ++ radeon_ring_alloc(rdev, 64); ++ radeon_fence_create(rdev, &fence); ++ radeon_fence_emit(rdev, fence); ++ radeon_ring_commit(rdev); ++ radeon_fence_wait(fence, false); ++ radeon_fence_unref(&fence); ++ } + } +-#endif ++ radeon_unmap_vram_bos(rdev); ++ ++ if (rdev->irq.installed) { ++ for (i = 0; i < rdev->num_crtc; i++) { ++ if (rdev->pm.active_crtcs & (1 << i)) { ++ rdev->pm.req_vblank |= (1 << i); ++ drm_vblank_get(rdev->ddev, i); ++ } ++ } ++ } ++ ++ radeon_set_power_state(rdev); ++ ++ if (rdev->irq.installed) { ++ for (i = 0; i < rdev->num_crtc; i++) { ++ if (rdev->pm.req_vblank & (1 << i)) { ++ rdev->pm.req_vblank &= ~(1 << i); ++ drm_vblank_put(rdev->ddev, i); ++ } ++ } ++ } ++ ++ /* update display watermarks based on new power state */ ++ radeon_update_bandwidth_info(rdev); ++ if (rdev->pm.active_crtc_count) ++ radeon_bandwidth_update(rdev); ++ ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; ++ ++ mutex_unlock(&rdev->cp.mutex); ++ mutex_unlock(&rdev->vram_mutex); ++ mutex_unlock(&rdev->ddev->struct_mutex); ++} ++ ++static void radeon_pm_print_states(struct radeon_device *rdev) ++{ ++ int i, j; ++ struct radeon_power_state *power_state; ++ struct radeon_pm_clock_info *clock_info; ++ ++ DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states); ++ for (i = 0; i < rdev->pm.num_power_states; i++) { ++ power_state = &rdev->pm.power_state[i]; ++ DRM_DEBUG("State %d: %s\n", i, ++ radeon_pm_state_type_name[power_state->type]); ++ if (i == rdev->pm.default_power_state_index) ++ DRM_DEBUG("\tDefault"); ++ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) ++ DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes); ++ if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) ++ DRM_DEBUG("\tSingle display only\n"); ++ DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes); ++ for (j = 0; j < power_state->num_clock_modes; j++) { ++ clock_info = &(power_state->clock_info[j]); ++ if (rdev->flags & RADEON_IS_IGP) ++ DRM_DEBUG("\t\t%d e: %d%s\n", ++ j, ++ clock_info->sclk * 10, ++ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); ++ else ++ DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n", ++ j, ++ clock_info->sclk * 10, ++ clock_info->mclk * 10, ++ clock_info->voltage.voltage, ++ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); ++ } ++ } ++} ++ ++static ssize_t radeon_get_pm_profile(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); ++ struct radeon_device *rdev = ddev->dev_private; ++ int cp = rdev->pm.profile; ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", ++ (cp == PM_PROFILE_AUTO) ? "auto" : ++ (cp == PM_PROFILE_LOW) ? "low" : ++ (cp == PM_PROFILE_HIGH) ? "high" : "default"); ++} ++ ++static ssize_t radeon_set_pm_profile(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); ++ struct radeon_device *rdev = ddev->dev_private; ++ ++ mutex_lock(&rdev->pm.mutex); ++ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { ++ if (strncmp("default", buf, strlen("default")) == 0) ++ rdev->pm.profile = PM_PROFILE_DEFAULT; ++ else if (strncmp("auto", buf, strlen("auto")) == 0) ++ rdev->pm.profile = PM_PROFILE_AUTO; ++ else if (strncmp("low", buf, strlen("low")) == 0) ++ rdev->pm.profile = PM_PROFILE_LOW; ++ else if (strncmp("mid", buf, strlen("mid")) == 0) ++ rdev->pm.profile = PM_PROFILE_MID; ++ else if (strncmp("high", buf, strlen("high")) == 0) ++ rdev->pm.profile = PM_PROFILE_HIGH; ++ else { ++ DRM_ERROR("invalid power profile!\n"); ++ goto fail; ++ } ++ radeon_pm_update_profile(rdev); ++ radeon_pm_set_clocks(rdev); ++ } ++fail: ++ mutex_unlock(&rdev->pm.mutex); ++ ++ return count; ++} ++ ++static ssize_t radeon_get_pm_method(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); ++ struct radeon_device *rdev = ddev->dev_private; ++ int pm = rdev->pm.pm_method; ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", ++ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); ++} ++ ++static ssize_t radeon_set_pm_method(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); ++ struct radeon_device *rdev = ddev->dev_private; ++ ++ ++ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { ++ mutex_lock(&rdev->pm.mutex); ++ rdev->pm.pm_method = PM_METHOD_DYNPM; ++ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; ++ mutex_unlock(&rdev->pm.mutex); ++ } else if (strncmp("profile", buf, strlen("profile")) == 0) { ++ mutex_lock(&rdev->pm.mutex); ++ rdev->pm.pm_method = PM_METHOD_PROFILE; ++ /* disable dynpm */ ++ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; ++ cancel_delayed_work(&rdev->pm.dynpm_idle_work); ++ mutex_unlock(&rdev->pm.mutex); ++ } else { ++ DRM_ERROR("invalid power method!\n"); ++ goto fail; ++ } ++ radeon_pm_compute_clocks(rdev); ++fail: ++ return count; ++} ++ ++static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); ++static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); + +- rdev->pm.current_power_state = rdev->pm.requested_power_state; +- rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; ++void radeon_pm_suspend(struct radeon_device *rdev) ++{ ++ mutex_lock(&rdev->pm.mutex); ++ cancel_delayed_work(&rdev->pm.dynpm_idle_work); ++ mutex_unlock(&rdev->pm.mutex); ++} ++ ++void radeon_pm_resume(struct radeon_device *rdev) ++{ ++ /* asic init will reset the default power state */ ++ mutex_lock(&rdev->pm.mutex); ++ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; ++ rdev->pm.current_clock_mode_index = 0; ++ rdev->pm.current_sclk = rdev->clock.default_sclk; ++ rdev->pm.current_mclk = rdev->clock.default_mclk; ++ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; ++ mutex_unlock(&rdev->pm.mutex); ++ radeon_pm_compute_clocks(rdev); + } + + int radeon_pm_init(struct radeon_device *rdev) + { +- rdev->pm.state = PM_STATE_DISABLED; +- rdev->pm.planned_action = PM_ACTION_NONE; +- rdev->pm.downclocked = false; ++ int ret; ++ /* default to profile method */ ++ rdev->pm.pm_method = PM_METHOD_PROFILE; ++ rdev->pm.profile = PM_PROFILE_DEFAULT; ++ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; ++ rdev->pm.dynpm_can_upclock = true; ++ rdev->pm.dynpm_can_downclock = true; ++ rdev->pm.current_sclk = rdev->clock.default_sclk; ++ rdev->pm.current_mclk = rdev->clock.default_mclk; + + if (rdev->bios) { + if (rdev->is_atom_bios) + radeon_atombios_get_power_modes(rdev); + else + radeon_combios_get_power_modes(rdev); +- radeon_print_power_mode_info(rdev); ++ radeon_pm_print_states(rdev); ++ radeon_pm_init_profile(rdev); + } + +- if (radeon_debugfs_pm_init(rdev)) { +- DRM_ERROR("Failed to register debugfs file for PM!\n"); +- } ++ if (rdev->pm.num_power_states > 1) { ++ /* where's the best place to put these? */ ++ ret = device_create_file(rdev->dev, &dev_attr_power_profile); ++ if (ret) ++ DRM_ERROR("failed to create device file for power profile\n"); ++ ret = device_create_file(rdev->dev, &dev_attr_power_method); ++ if (ret) ++ DRM_ERROR("failed to create device file for power method\n"); ++ ++#ifdef CONFIG_ACPI ++ rdev->acpi_nb.notifier_call = radeon_acpi_event; ++ register_acpi_notifier(&rdev->acpi_nb); ++#endif ++ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); + +- INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); ++ if (radeon_debugfs_pm_init(rdev)) { ++ DRM_ERROR("Failed to register debugfs file for PM!\n"); ++ } + +- if (radeon_dynpm != -1 && radeon_dynpm) { +- rdev->pm.state = PM_STATE_PAUSED; +- DRM_INFO("radeon: dynamic power management enabled\n"); ++ DRM_INFO("radeon: power management initialized\n"); + } + +- DRM_INFO("radeon: power management initialized\n"); +- + return 0; + } + + void radeon_pm_fini(struct radeon_device *rdev) + { ++ if (rdev->pm.num_power_states > 1) { ++ mutex_lock(&rdev->pm.mutex); ++ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { ++ rdev->pm.profile = PM_PROFILE_DEFAULT; ++ radeon_pm_update_profile(rdev); ++ radeon_pm_set_clocks(rdev); ++ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { ++ /* cancel work */ ++ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); ++ /* reset default clocks */ ++ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; ++ radeon_pm_set_clocks(rdev); ++ } ++ mutex_unlock(&rdev->pm.mutex); ++ ++ device_remove_file(rdev->dev, &dev_attr_power_profile); ++ device_remove_file(rdev->dev, &dev_attr_power_method); ++#ifdef CONFIG_ACPI ++ unregister_acpi_notifier(&rdev->acpi_nb); ++#endif ++ } ++ + if (rdev->pm.i2c_bus) + radeon_i2c_destroy(rdev->pm.i2c_bus); + } +@@ -266,146 +515,167 @@ void radeon_pm_fini(struct radeon_device *rdev) + void radeon_pm_compute_clocks(struct radeon_device *rdev) + { + struct drm_device *ddev = rdev->ddev; +- struct drm_connector *connector; ++ struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; +- int count = 0; + +- if (rdev->pm.state == PM_STATE_DISABLED) ++ if (rdev->pm.num_power_states < 2) + return; + + mutex_lock(&rdev->pm.mutex); + + rdev->pm.active_crtcs = 0; +- list_for_each_entry(connector, +- &ddev->mode_config.connector_list, head) { +- if (connector->encoder && +- connector->encoder->crtc && +- connector->dpms != DRM_MODE_DPMS_OFF) { +- radeon_crtc = to_radeon_crtc(connector->encoder->crtc); ++ rdev->pm.active_crtc_count = 0; ++ list_for_each_entry(crtc, ++ &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { + rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); +- ++count; ++ rdev->pm.active_crtc_count++; + } + } + +- if (count > 1) { +- if (rdev->pm.state == PM_STATE_ACTIVE) { +- cancel_delayed_work(&rdev->pm.idle_work); +- +- rdev->pm.state = PM_STATE_PAUSED; +- rdev->pm.planned_action = PM_ACTION_UPCLOCK; +- if (rdev->pm.downclocked) +- radeon_pm_set_clocks(rdev); +- +- DRM_DEBUG("radeon: dynamic power management deactivated\n"); +- } +- } else if (count == 1) { +- /* TODO: Increase clocks if needed for current mode */ +- +- if (rdev->pm.state == PM_STATE_MINIMUM) { +- rdev->pm.state = PM_STATE_ACTIVE; +- rdev->pm.planned_action = PM_ACTION_UPCLOCK; +- radeon_pm_set_clocks(rdev); +- +- queue_delayed_work(rdev->wq, &rdev->pm.idle_work, +- msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); +- } +- else if (rdev->pm.state == PM_STATE_PAUSED) { +- rdev->pm.state = PM_STATE_ACTIVE; +- queue_delayed_work(rdev->wq, &rdev->pm.idle_work, +- msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); +- DRM_DEBUG("radeon: dynamic power management activated\n"); +- } +- } +- else { /* count == 0 */ +- if (rdev->pm.state != PM_STATE_MINIMUM) { +- cancel_delayed_work(&rdev->pm.idle_work); +- +- rdev->pm.state = PM_STATE_MINIMUM; +- rdev->pm.planned_action = PM_ACTION_MINIMUM; +- radeon_pm_set_clocks(rdev); ++ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { ++ radeon_pm_update_profile(rdev); ++ radeon_pm_set_clocks(rdev); ++ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { ++ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { ++ if (rdev->pm.active_crtc_count > 1) { ++ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { ++ cancel_delayed_work(&rdev->pm.dynpm_idle_work); ++ ++ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; ++ radeon_pm_get_dynpm_state(rdev); ++ radeon_pm_set_clocks(rdev); ++ ++ DRM_DEBUG("radeon: dynamic power management deactivated\n"); ++ } ++ } else if (rdev->pm.active_crtc_count == 1) { ++ /* TODO: Increase clocks if needed for current mode */ ++ ++ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { ++ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; ++ radeon_pm_get_dynpm_state(rdev); ++ radeon_pm_set_clocks(rdev); ++ ++ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, ++ msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); ++ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { ++ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; ++ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, ++ msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); ++ DRM_DEBUG("radeon: dynamic power management activated\n"); ++ } ++ } else { /* count == 0 */ ++ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { ++ cancel_delayed_work(&rdev->pm.dynpm_idle_work); ++ ++ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; ++ radeon_pm_get_dynpm_state(rdev); ++ radeon_pm_set_clocks(rdev); ++ } ++ } + } + } + + mutex_unlock(&rdev->pm.mutex); + } + +-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) ++static bool radeon_pm_in_vbl(struct radeon_device *rdev) + { +- u32 stat_crtc1 = 0, stat_crtc2 = 0; ++ u32 stat_crtc = 0, vbl = 0, position = 0; + bool in_vbl = true; + +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { + if (rdev->pm.active_crtcs & (1 << 0)) { +- stat_crtc1 = RREG32(D1CRTC_STATUS); +- if (!(stat_crtc1 & 1)) ++ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + ++ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; ++ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + ++ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; ++ } ++ if (rdev->pm.active_crtcs & (1 << 1)) { ++ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + ++ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; ++ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + ++ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; ++ } ++ if (rdev->pm.active_crtcs & (1 << 2)) { ++ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + ++ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; ++ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + ++ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; ++ } ++ if (rdev->pm.active_crtcs & (1 << 3)) { ++ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + ++ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; ++ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + ++ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; ++ } ++ if (rdev->pm.active_crtcs & (1 << 4)) { ++ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + ++ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; ++ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + ++ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; ++ } ++ if (rdev->pm.active_crtcs & (1 << 5)) { ++ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + ++ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; ++ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + ++ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; ++ } ++ } else if (ASIC_IS_AVIVO(rdev)) { ++ if (rdev->pm.active_crtcs & (1 << 0)) { ++ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; ++ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; ++ } ++ if (rdev->pm.active_crtcs & (1 << 1)) { ++ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; ++ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; ++ } ++ if (position < vbl && position > 1) ++ in_vbl = false; ++ } else { ++ if (rdev->pm.active_crtcs & (1 << 0)) { ++ stat_crtc = RREG32(RADEON_CRTC_STATUS); ++ if (!(stat_crtc & 1)) + in_vbl = false; + } + if (rdev->pm.active_crtcs & (1 << 1)) { +- stat_crtc2 = RREG32(D2CRTC_STATUS); +- if (!(stat_crtc2 & 1)) ++ stat_crtc = RREG32(RADEON_CRTC2_STATUS); ++ if (!(stat_crtc & 1)) + in_vbl = false; + } + } +- if (in_vbl == false) +- DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1, +- stat_crtc2, finish ? "exit" : "entry"); +- return in_vbl; +-} +-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) +-{ +- /*radeon_fence_wait_last(rdev);*/ +- switch (rdev->pm.planned_action) { +- case PM_ACTION_UPCLOCK: +- rdev->pm.downclocked = false; +- break; +- case PM_ACTION_DOWNCLOCK: +- rdev->pm.downclocked = true; +- break; +- case PM_ACTION_MINIMUM: +- break; +- case PM_ACTION_NONE: +- DRM_ERROR("%s: PM_ACTION_NONE\n", __func__); +- break; +- } + +- radeon_set_power_state(rdev); +- rdev->pm.planned_action = PM_ACTION_NONE; ++ if (position < vbl && position > 1) ++ in_vbl = false; ++ ++ return in_vbl; + } + +-static void radeon_pm_set_clocks(struct radeon_device *rdev) ++static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) + { +- radeon_get_power_state(rdev, rdev->pm.planned_action); +- mutex_lock(&rdev->cp.mutex); ++ u32 stat_crtc = 0; ++ bool in_vbl = radeon_pm_in_vbl(rdev); + +- if (rdev->pm.active_crtcs & (1 << 0)) { +- rdev->pm.req_vblank |= (1 << 0); +- drm_vblank_get(rdev->ddev, 0); +- } +- if (rdev->pm.active_crtcs & (1 << 1)) { +- rdev->pm.req_vblank |= (1 << 1); +- drm_vblank_get(rdev->ddev, 1); +- } +- radeon_pm_set_clocks_locked(rdev); +- if (rdev->pm.req_vblank & (1 << 0)) { +- rdev->pm.req_vblank &= ~(1 << 0); +- drm_vblank_put(rdev->ddev, 0); +- } +- if (rdev->pm.req_vblank & (1 << 1)) { +- rdev->pm.req_vblank &= ~(1 << 1); +- drm_vblank_put(rdev->ddev, 1); +- } +- +- mutex_unlock(&rdev->cp.mutex); ++ if (in_vbl == false) ++ DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc, ++ finish ? "exit" : "entry"); ++ return in_vbl; + } + +-static void radeon_pm_idle_work_handler(struct work_struct *work) ++static void radeon_dynpm_idle_work_handler(struct work_struct *work) + { + struct radeon_device *rdev; ++ int resched; + rdev = container_of(work, struct radeon_device, +- pm.idle_work.work); ++ pm.dynpm_idle_work.work); + ++ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); + mutex_lock(&rdev->pm.mutex); +- if (rdev->pm.state == PM_STATE_ACTIVE) { ++ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { + unsigned long irq_flags; + int not_processed = 0; + +@@ -421,35 +691,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) + read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); + + if (not_processed >= 3) { /* should upclock */ +- if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { +- rdev->pm.planned_action = PM_ACTION_NONE; +- } else if (rdev->pm.planned_action == PM_ACTION_NONE && +- rdev->pm.downclocked) { +- rdev->pm.planned_action = +- PM_ACTION_UPCLOCK; +- rdev->pm.action_timeout = jiffies + ++ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; ++ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && ++ rdev->pm.dynpm_can_upclock) { ++ rdev->pm.dynpm_planned_action = ++ DYNPM_ACTION_UPCLOCK; ++ rdev->pm.dynpm_action_timeout = jiffies + + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); + } + } else if (not_processed == 0) { /* should downclock */ +- if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { +- rdev->pm.planned_action = PM_ACTION_NONE; +- } else if (rdev->pm.planned_action == PM_ACTION_NONE && +- !rdev->pm.downclocked) { +- rdev->pm.planned_action = +- PM_ACTION_DOWNCLOCK; +- rdev->pm.action_timeout = jiffies + ++ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { ++ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; ++ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && ++ rdev->pm.dynpm_can_downclock) { ++ rdev->pm.dynpm_planned_action = ++ DYNPM_ACTION_DOWNCLOCK; ++ rdev->pm.dynpm_action_timeout = jiffies + + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); + } + } + +- if (rdev->pm.planned_action != PM_ACTION_NONE && +- jiffies > rdev->pm.action_timeout) { ++ /* Note, radeon_pm_set_clocks is called with static_switch set ++ * to false since we want to wait for vbl to avoid flicker. ++ */ ++ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && ++ jiffies > rdev->pm.dynpm_action_timeout) { ++ radeon_pm_get_dynpm_state(rdev); + radeon_pm_set_clocks(rdev); + } + } + mutex_unlock(&rdev->pm.mutex); ++ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); + +- queue_delayed_work(rdev->wq, &rdev->pm.idle_work, ++ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + } + +@@ -464,12 +739,13 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + +- seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); + seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); + seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); + if (rdev->asic->get_memory_clock) + seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); ++ if (rdev->pm.current_vddc) ++ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); + if (rdev->asic->get_pcie_lanes) + seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); + +diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h +index eabbc9c..c332f46 100644 +--- a/drivers/gpu/drm/radeon/radeon_reg.h ++++ b/drivers/gpu/drm/radeon/radeon_reg.h +@@ -553,7 +553,6 @@ + # define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) + #define RADEON_CRTC2_CRNT_FRAME 0x0314 + #define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 +-#define RADEON_CRTC2_STATUS 0x03fc + #define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 + #define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ + #define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ +@@ -995,6 +994,7 @@ + # define RADEON_FP_DETECT_MASK (1 << 4) + # define RADEON_CRTC2_VBLANK_MASK (1 << 9) + # define RADEON_FP2_DETECT_MASK (1 << 10) ++# define RADEON_GUI_IDLE_MASK (1 << 19) + # define RADEON_SW_INT_ENABLE (1 << 25) + #define RADEON_GEN_INT_STATUS 0x0044 + # define AVIVO_DISPLAY_INT_STATUS (1 << 0) +@@ -1006,6 +1006,8 @@ + # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) + # define RADEON_FP2_DETECT_STAT (1 << 10) + # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) ++# define RADEON_GUI_IDLE_STAT (1 << 19) ++# define RADEON_GUI_IDLE_STAT_ACK (1 << 19) + # define RADEON_SW_INT_FIRE (1 << 26) + # define RADEON_SW_INT_TEST (1 << 25) + # define RADEON_SW_INT_TEST_ACK (1 << 25) +diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c +index f6e1e8d..261e98a 100644 +--- a/drivers/gpu/drm/radeon/radeon_ring.c ++++ b/drivers/gpu/drm/radeon/radeon_ring.c +@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev) + void radeon_ib_pool_fini(struct radeon_device *rdev) + { + int r; ++ struct radeon_bo *robj; + + if (!rdev->ib_pool.ready) { + return; + } + mutex_lock(&rdev->ib_pool.mutex); + radeon_ib_bogus_cleanup(rdev); ++ robj = rdev->ib_pool.robj; ++ rdev->ib_pool.robj = NULL; ++ mutex_unlock(&rdev->ib_pool.mutex); + +- if (rdev->ib_pool.robj) { +- r = radeon_bo_reserve(rdev->ib_pool.robj, false); ++ if (robj) { ++ r = radeon_bo_reserve(robj, false); + if (likely(r == 0)) { +- radeon_bo_kunmap(rdev->ib_pool.robj); +- radeon_bo_unpin(rdev->ib_pool.robj); +- radeon_bo_unreserve(rdev->ib_pool.robj); ++ radeon_bo_kunmap(robj); ++ radeon_bo_unpin(robj); ++ radeon_bo_unreserve(robj); + } +- radeon_bo_unref(&rdev->ib_pool.robj); +- rdev->ib_pool.robj = NULL; ++ radeon_bo_unref(&robj); + } +- mutex_unlock(&rdev->ib_pool.mutex); + } + + +@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev) + } + } + +-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) ++int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) + { + int r; + + /* Align requested size with padding so unlock_commit can + * pad safely */ + ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; +- mutex_lock(&rdev->cp.mutex); + while (ndw > (rdev->cp.ring_free_dw - 1)) { + radeon_ring_free_size(rdev); + if (ndw < rdev->cp.ring_free_dw) { + break; + } + r = radeon_fence_wait_next(rdev); +- if (r) { +- mutex_unlock(&rdev->cp.mutex); ++ if (r) + return r; +- } + } + rdev->cp.count_dw = ndw; + rdev->cp.wptr_old = rdev->cp.wptr; + return 0; + } + +-void radeon_ring_unlock_commit(struct radeon_device *rdev) ++int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) ++{ ++ int r; ++ ++ mutex_lock(&rdev->cp.mutex); ++ r = radeon_ring_alloc(rdev, ndw); ++ if (r) { ++ mutex_unlock(&rdev->cp.mutex); ++ return r; ++ } ++ return 0; ++} ++ ++void radeon_ring_commit(struct radeon_device *rdev) + { + unsigned count_dw_pad; + unsigned i; +@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) + } + DRM_MEMORYBARRIER(); + radeon_cp_commit(rdev); ++} ++ ++void radeon_ring_unlock_commit(struct radeon_device *rdev) ++{ ++ radeon_ring_commit(rdev); + mutex_unlock(&rdev->cp.mutex); + } + +@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) + void radeon_ring_fini(struct radeon_device *rdev) + { + int r; ++ struct radeon_bo *ring_obj; + + mutex_lock(&rdev->cp.mutex); +- if (rdev->cp.ring_obj) { +- r = radeon_bo_reserve(rdev->cp.ring_obj, false); ++ ring_obj = rdev->cp.ring_obj; ++ rdev->cp.ring = NULL; ++ rdev->cp.ring_obj = NULL; ++ mutex_unlock(&rdev->cp.mutex); ++ ++ if (ring_obj) { ++ r = radeon_bo_reserve(ring_obj, false); + if (likely(r == 0)) { +- radeon_bo_kunmap(rdev->cp.ring_obj); +- radeon_bo_unpin(rdev->cp.ring_obj); +- radeon_bo_unreserve(rdev->cp.ring_obj); ++ radeon_bo_kunmap(ring_obj); ++ radeon_bo_unpin(ring_obj); ++ radeon_bo_unreserve(ring_obj); + } +- radeon_bo_unref(&rdev->cp.ring_obj); +- rdev->cp.ring = NULL; +- rdev->cp.ring_obj = NULL; ++ radeon_bo_unref(&ring_obj); + } +- mutex_unlock(&rdev->cp.mutex); + } + + +diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c +index cc5316d..b3ba44c 100644 +--- a/drivers/gpu/drm/radeon/radeon_state.c ++++ b/drivers/gpu/drm/radeon/radeon_state.c +@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, + flags |= RADEON_FRONT; + } + if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { +- if (!dev_priv->have_z_offset) ++ if (!dev_priv->have_z_offset) { + printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); +- flags &= ~(RADEON_DEPTH | RADEON_STENCIL); ++ flags &= ~(RADEON_DEPTH | RADEON_STENCIL); ++ } + } + + if (flags & (RADEON_FRONT | RADEON_BACK)) { +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index d031b68..e9918d8 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + (unsigned)type); + return -EINVAL; + } +- man->io_offset = rdev->mc.agp_base; +- man->io_size = rdev->mc.gtt_size; +- man->io_addr = NULL; + if (!rdev->ddev->agp->cant_use_aperture) +- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | +- TTM_MEMTYPE_FLAG_MAPPABLE; ++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; +- } else +-#endif +- { +- man->io_offset = 0; +- man->io_size = 0; +- man->io_addr = NULL; + } ++#endif + break; + case TTM_PL_VRAM: + /* "On-card" video ram */ + man->gpu_offset = rdev->mc.vram_start; + man->flags = TTM_MEMTYPE_FLAG_FIXED | +- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; +- man->io_addr = NULL; +- man->io_offset = rdev->mc.aper_base; +- man->io_size = rdev->mc.aper_size; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); +@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo, + } + + static int radeon_move_blit(struct ttm_buffer_object *bo, +- bool evict, int no_wait, +- struct ttm_mem_reg *new_mem, +- struct ttm_mem_reg *old_mem) ++ bool evict, int no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem, ++ struct ttm_mem_reg *old_mem) + { + struct radeon_device *rdev; + uint64_t old_start, new_start; +@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, + r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); + /* FIXME: handle copy error */ + r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, +- evict, no_wait, new_mem); ++ evict, no_wait_reserve, no_wait_gpu, new_mem); + radeon_fence_unref(&fence); + return r; + } + + static int radeon_move_vram_ram(struct ttm_buffer_object *bo, +- bool evict, bool interruptible, bool no_wait, ++ bool evict, bool interruptible, ++ bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) + { + struct radeon_device *rdev; +@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, + placement.busy_placement = &placements; + placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, +- interruptible, no_wait); ++ interruptible, no_wait_reserve, no_wait_gpu); + if (unlikely(r)) { + return r; + } +@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, + if (unlikely(r)) { + goto out_cleanup; + } +- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); ++ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); + if (unlikely(r)) { + goto out_cleanup; + } +- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); ++ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); + out_cleanup: + if (tmp_mem.mm_node) { + struct ttm_bo_global *glob = rdev->mman.bdev.glob; +@@ -349,7 +338,8 @@ out_cleanup: + } + + static int radeon_move_ram_vram(struct ttm_buffer_object *bo, +- bool evict, bool interruptible, bool no_wait, ++ bool evict, bool interruptible, ++ bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) + { + struct radeon_device *rdev; +@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; +- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); ++ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); + if (unlikely(r)) { + return r; + } +- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); ++ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); + if (unlikely(r)) { + goto out_cleanup; + } +- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); ++ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); + if (unlikely(r)) { + goto out_cleanup; + } +@@ -394,8 +384,9 @@ out_cleanup: + } + + static int radeon_bo_move(struct ttm_buffer_object *bo, +- bool evict, bool interruptible, bool no_wait, +- struct ttm_mem_reg *new_mem) ++ bool evict, bool interruptible, ++ bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem) + { + struct radeon_device *rdev; + struct ttm_mem_reg *old_mem = &bo->mem; +@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, + if (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM) { + r = radeon_move_vram_ram(bo, evict, interruptible, +- no_wait, new_mem); ++ no_wait_reserve, no_wait_gpu, new_mem); + } else if (old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) { + r = radeon_move_ram_vram(bo, evict, interruptible, +- no_wait, new_mem); ++ no_wait_reserve, no_wait_gpu, new_mem); + } else { +- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); ++ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); + } + + if (r) { + memcpy: +- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + } +- + return r; + } + ++static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ++ struct radeon_device *rdev = radeon_get_rdev(bdev); ++ ++ mem->bus.addr = NULL; ++ mem->bus.offset = 0; ++ mem->bus.size = mem->num_pages << PAGE_SHIFT; ++ mem->bus.base = 0; ++ mem->bus.is_iomem = false; ++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) ++ return -EINVAL; ++ switch (mem->mem_type) { ++ case TTM_PL_SYSTEM: ++ /* system memory */ ++ return 0; ++ case TTM_PL_TT: ++#if __OS_HAS_AGP ++ if (rdev->flags & RADEON_IS_AGP) { ++ /* RADEON_IS_AGP is set only if AGP is active */ ++ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; ++ mem->bus.base = rdev->mc.agp_base; ++ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; ++ } ++#endif ++ break; ++ case TTM_PL_VRAM: ++ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; ++ /* check if it's visible */ ++ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) ++ return -EINVAL; ++ mem->bus.base = rdev->mc.aper_base; ++ mem->bus.is_iomem = true; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++} ++ + static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) + { +@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = { + .sync_obj_ref = &radeon_sync_obj_ref, + .move_notify = &radeon_bo_move_notify, + .fault_reserve_notify = &radeon_bo_fault_reserve_notify, ++ .io_mem_reserve = &radeon_ttm_io_mem_reserve, ++ .io_mem_free = &radeon_ttm_io_mem_free, + }; + + int radeon_ttm_init(struct radeon_device *rdev) +@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL; + static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + { + struct ttm_buffer_object *bo; ++ struct radeon_device *rdev; + int r; + +- bo = (struct ttm_buffer_object *)vma->vm_private_data; ++ bo = (struct ttm_buffer_object *)vma->vm_private_data; + if (bo == NULL) { + return VM_FAULT_NOPAGE; + } ++ rdev = radeon_get_rdev(bo->bdev); ++ mutex_lock(&rdev->vram_mutex); + r = ttm_vm_ops->fault(vma, vmf); ++ mutex_unlock(&rdev->vram_mutex); + return r; + } + +@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) + static int radeon_ttm_debugfs_init(struct radeon_device *rdev) + { + #if defined(CONFIG_DEBUG_FS) +- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; +- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; ++ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1]; ++ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32]; + unsigned i; + + for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { +@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; + + } +- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); ++ /* Add ttm page pool to debugfs */ ++ sprintf(radeon_mem_types_names[i], "ttm_page_pool"); ++ radeon_mem_types_list[i].name = radeon_mem_types_names[i]; ++ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; ++ radeon_mem_types_list[i].driver_features = 0; ++ radeon_mem_types_list[i].data = NULL; ++ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1); + + #endif + return 0; +diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen +new file mode 100644 +index 0000000..b5c757f +--- /dev/null ++++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen +@@ -0,0 +1,611 @@ ++evergreen 0x9400 ++0x00008040 WAIT_UNTIL ++0x00008044 WAIT_UNTIL_POLL_CNTL ++0x00008048 WAIT_UNTIL_POLL_MASK ++0x0000804c WAIT_UNTIL_POLL_REFDATA ++0x000088B0 VGT_VTX_VECT_EJECT_REG ++0x000088C4 VGT_CACHE_INVALIDATION ++0x000088D4 VGT_GS_VERTEX_REUSE ++0x00008958 VGT_PRIMITIVE_TYPE ++0x0000895C VGT_INDEX_TYPE ++0x00008970 VGT_NUM_INDICES ++0x00008974 VGT_NUM_INSTANCES ++0x00008990 VGT_COMPUTE_DIM_X ++0x00008994 VGT_COMPUTE_DIM_Y ++0x00008998 VGT_COMPUTE_DIM_Z ++0x0000899C VGT_COMPUTE_START_X ++0x000089A0 VGT_COMPUTE_START_Y ++0x000089A4 VGT_COMPUTE_START_Z ++0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE ++0x00008A14 PA_CL_ENHANCE ++0x00008A60 PA_SC_LINE_STIPPLE_VALUE ++0x00008B10 PA_SC_LINE_STIPPLE_STATE ++0x00008BF0 PA_SC_ENHANCE ++0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ ++0x00008C00 SQ_CONFIG ++0x00008C04 SQ_GPR_RESOURCE_MGMT_1 ++0x00008C08 SQ_GPR_RESOURCE_MGMT_2 ++0x00008C0C SQ_GPR_RESOURCE_MGMT_3 ++0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 ++0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 ++0x00008C18 SQ_THREAD_RESOURCE_MGMT ++0x00008C1C SQ_THREAD_RESOURCE_MGMT_2 ++0x00008C20 SQ_STACK_RESOURCE_MGMT_1 ++0x00008C24 SQ_STACK_RESOURCE_MGMT_2 ++0x00008C28 SQ_STACK_RESOURCE_MGMT_3 ++0x00008DF8 SQ_CONST_MEM_BASE ++0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS ++0x00009100 SPI_CONFIG_CNTL ++0x0000913C SPI_CONFIG_CNTL_1 ++0x00009700 VC_CNTL ++0x00009714 VC_ENHANCE ++0x00009830 DB_DEBUG ++0x00009834 DB_DEBUG2 ++0x00009838 DB_DEBUG3 ++0x0000983C DB_DEBUG4 ++0x00009854 DB_WATERMARKS ++0x0000A400 TD_PS_BORDER_COLOR_INDEX ++0x0000A404 TD_PS_BORDER_COLOR_RED ++0x0000A408 TD_PS_BORDER_COLOR_GREEN ++0x0000A40C TD_PS_BORDER_COLOR_BLUE ++0x0000A410 TD_PS_BORDER_COLOR_ALPHA ++0x0000A414 TD_VS_BORDER_COLOR_INDEX ++0x0000A418 TD_VS_BORDER_COLOR_RED ++0x0000A41C TD_VS_BORDER_COLOR_GREEN ++0x0000A420 TD_VS_BORDER_COLOR_BLUE ++0x0000A424 TD_VS_BORDER_COLOR_ALPHA ++0x0000A428 TD_GS_BORDER_COLOR_INDEX ++0x0000A42C TD_GS_BORDER_COLOR_RED ++0x0000A430 TD_GS_BORDER_COLOR_GREEN ++0x0000A434 TD_GS_BORDER_COLOR_BLUE ++0x0000A438 TD_GS_BORDER_COLOR_ALPHA ++0x0000A43C TD_HS_BORDER_COLOR_INDEX ++0x0000A440 TD_HS_BORDER_COLOR_RED ++0x0000A444 TD_HS_BORDER_COLOR_GREEN ++0x0000A448 TD_HS_BORDER_COLOR_BLUE ++0x0000A44C TD_HS_BORDER_COLOR_ALPHA ++0x0000A450 TD_LS_BORDER_COLOR_INDEX ++0x0000A454 TD_LS_BORDER_COLOR_RED ++0x0000A458 TD_LS_BORDER_COLOR_GREEN ++0x0000A45C TD_LS_BORDER_COLOR_BLUE ++0x0000A460 TD_LS_BORDER_COLOR_ALPHA ++0x0000A464 TD_CS_BORDER_COLOR_INDEX ++0x0000A468 TD_CS_BORDER_COLOR_RED ++0x0000A46C TD_CS_BORDER_COLOR_GREEN ++0x0000A470 TD_CS_BORDER_COLOR_BLUE ++0x0000A474 TD_CS_BORDER_COLOR_ALPHA ++0x00028000 DB_RENDER_CONTROL ++0x00028004 DB_COUNT_CONTROL ++0x0002800C DB_RENDER_OVERRIDE ++0x00028010 DB_RENDER_OVERRIDE2 ++0x00028028 DB_STENCIL_CLEAR ++0x0002802C DB_DEPTH_CLEAR ++0x00028034 PA_SC_SCREEN_SCISSOR_BR ++0x00028030 PA_SC_SCREEN_SCISSOR_TL ++0x0002805C DB_DEPTH_SLICE ++0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 ++0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 ++0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 ++0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 ++0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 ++0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 ++0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 ++0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 ++0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 ++0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 ++0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 ++0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 ++0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 ++0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 ++0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 ++0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 ++0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 ++0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 ++0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 ++0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 ++0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 ++0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 ++0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 ++0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 ++0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 ++0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 ++0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 ++0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 ++0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 ++0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 ++0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 ++0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 ++0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 ++0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 ++0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 ++0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 ++0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 ++0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 ++0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 ++0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 ++0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 ++0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 ++0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 ++0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 ++0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 ++0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 ++0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 ++0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 ++0x00028200 PA_SC_WINDOW_OFFSET ++0x00028204 PA_SC_WINDOW_SCISSOR_TL ++0x00028208 PA_SC_WINDOW_SCISSOR_BR ++0x0002820C PA_SC_CLIPRECT_RULE ++0x00028210 PA_SC_CLIPRECT_0_TL ++0x00028214 PA_SC_CLIPRECT_0_BR ++0x00028218 PA_SC_CLIPRECT_1_TL ++0x0002821C PA_SC_CLIPRECT_1_BR ++0x00028220 PA_SC_CLIPRECT_2_TL ++0x00028224 PA_SC_CLIPRECT_2_BR ++0x00028228 PA_SC_CLIPRECT_3_TL ++0x0002822C PA_SC_CLIPRECT_3_BR ++0x00028230 PA_SC_EDGERULE ++0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET ++0x00028240 PA_SC_GENERIC_SCISSOR_TL ++0x00028244 PA_SC_GENERIC_SCISSOR_BR ++0x00028250 PA_SC_VPORT_SCISSOR_0_TL ++0x00028254 PA_SC_VPORT_SCISSOR_0_BR ++0x00028258 PA_SC_VPORT_SCISSOR_1_TL ++0x0002825C PA_SC_VPORT_SCISSOR_1_BR ++0x00028260 PA_SC_VPORT_SCISSOR_2_TL ++0x00028264 PA_SC_VPORT_SCISSOR_2_BR ++0x00028268 PA_SC_VPORT_SCISSOR_3_TL ++0x0002826C PA_SC_VPORT_SCISSOR_3_BR ++0x00028270 PA_SC_VPORT_SCISSOR_4_TL ++0x00028274 PA_SC_VPORT_SCISSOR_4_BR ++0x00028278 PA_SC_VPORT_SCISSOR_5_TL ++0x0002827C PA_SC_VPORT_SCISSOR_5_BR ++0x00028280 PA_SC_VPORT_SCISSOR_6_TL ++0x00028284 PA_SC_VPORT_SCISSOR_6_BR ++0x00028288 PA_SC_VPORT_SCISSOR_7_TL ++0x0002828C PA_SC_VPORT_SCISSOR_7_BR ++0x00028290 PA_SC_VPORT_SCISSOR_8_TL ++0x00028294 PA_SC_VPORT_SCISSOR_8_BR ++0x00028298 PA_SC_VPORT_SCISSOR_9_TL ++0x0002829C PA_SC_VPORT_SCISSOR_9_BR ++0x000282A0 PA_SC_VPORT_SCISSOR_10_TL ++0x000282A4 PA_SC_VPORT_SCISSOR_10_BR ++0x000282A8 PA_SC_VPORT_SCISSOR_11_TL ++0x000282AC PA_SC_VPORT_SCISSOR_11_BR ++0x000282B0 PA_SC_VPORT_SCISSOR_12_TL ++0x000282B4 PA_SC_VPORT_SCISSOR_12_BR ++0x000282B8 PA_SC_VPORT_SCISSOR_13_TL ++0x000282BC PA_SC_VPORT_SCISSOR_13_BR ++0x000282C0 PA_SC_VPORT_SCISSOR_14_TL ++0x000282C4 PA_SC_VPORT_SCISSOR_14_BR ++0x000282C8 PA_SC_VPORT_SCISSOR_15_TL ++0x000282CC PA_SC_VPORT_SCISSOR_15_BR ++0x000282D0 PA_SC_VPORT_ZMIN_0 ++0x000282D4 PA_SC_VPORT_ZMAX_0 ++0x000282D8 PA_SC_VPORT_ZMIN_1 ++0x000282DC PA_SC_VPORT_ZMAX_1 ++0x000282E0 PA_SC_VPORT_ZMIN_2 ++0x000282E4 PA_SC_VPORT_ZMAX_2 ++0x000282E8 PA_SC_VPORT_ZMIN_3 ++0x000282EC PA_SC_VPORT_ZMAX_3 ++0x000282F0 PA_SC_VPORT_ZMIN_4 ++0x000282F4 PA_SC_VPORT_ZMAX_4 ++0x000282F8 PA_SC_VPORT_ZMIN_5 ++0x000282FC PA_SC_VPORT_ZMAX_5 ++0x00028300 PA_SC_VPORT_ZMIN_6 ++0x00028304 PA_SC_VPORT_ZMAX_6 ++0x00028308 PA_SC_VPORT_ZMIN_7 ++0x0002830C PA_SC_VPORT_ZMAX_7 ++0x00028310 PA_SC_VPORT_ZMIN_8 ++0x00028314 PA_SC_VPORT_ZMAX_8 ++0x00028318 PA_SC_VPORT_ZMIN_9 ++0x0002831C PA_SC_VPORT_ZMAX_9 ++0x00028320 PA_SC_VPORT_ZMIN_10 ++0x00028324 PA_SC_VPORT_ZMAX_10 ++0x00028328 PA_SC_VPORT_ZMIN_11 ++0x0002832C PA_SC_VPORT_ZMAX_11 ++0x00028330 PA_SC_VPORT_ZMIN_12 ++0x00028334 PA_SC_VPORT_ZMAX_12 ++0x00028338 PA_SC_VPORT_ZMIN_13 ++0x0002833C PA_SC_VPORT_ZMAX_13 ++0x00028340 PA_SC_VPORT_ZMIN_14 ++0x00028344 PA_SC_VPORT_ZMAX_14 ++0x00028348 PA_SC_VPORT_ZMIN_15 ++0x0002834C PA_SC_VPORT_ZMAX_15 ++0x00028350 SX_MISC ++0x00028380 SQ_VTX_SEMANTIC_0 ++0x00028384 SQ_VTX_SEMANTIC_1 ++0x00028388 SQ_VTX_SEMANTIC_2 ++0x0002838C SQ_VTX_SEMANTIC_3 ++0x00028390 SQ_VTX_SEMANTIC_4 ++0x00028394 SQ_VTX_SEMANTIC_5 ++0x00028398 SQ_VTX_SEMANTIC_6 ++0x0002839C SQ_VTX_SEMANTIC_7 ++0x000283A0 SQ_VTX_SEMANTIC_8 ++0x000283A4 SQ_VTX_SEMANTIC_9 ++0x000283A8 SQ_VTX_SEMANTIC_10 ++0x000283AC SQ_VTX_SEMANTIC_11 ++0x000283B0 SQ_VTX_SEMANTIC_12 ++0x000283B4 SQ_VTX_SEMANTIC_13 ++0x000283B8 SQ_VTX_SEMANTIC_14 ++0x000283BC SQ_VTX_SEMANTIC_15 ++0x000283C0 SQ_VTX_SEMANTIC_16 ++0x000283C4 SQ_VTX_SEMANTIC_17 ++0x000283C8 SQ_VTX_SEMANTIC_18 ++0x000283CC SQ_VTX_SEMANTIC_19 ++0x000283D0 SQ_VTX_SEMANTIC_20 ++0x000283D4 SQ_VTX_SEMANTIC_21 ++0x000283D8 SQ_VTX_SEMANTIC_22 ++0x000283DC SQ_VTX_SEMANTIC_23 ++0x000283E0 SQ_VTX_SEMANTIC_24 ++0x000283E4 SQ_VTX_SEMANTIC_25 ++0x000283E8 SQ_VTX_SEMANTIC_26 ++0x000283EC SQ_VTX_SEMANTIC_27 ++0x000283F0 SQ_VTX_SEMANTIC_28 ++0x000283F4 SQ_VTX_SEMANTIC_29 ++0x000283F8 SQ_VTX_SEMANTIC_30 ++0x000283FC SQ_VTX_SEMANTIC_31 ++0x00028400 VGT_MAX_VTX_INDX ++0x00028404 VGT_MIN_VTX_INDX ++0x00028408 VGT_INDX_OFFSET ++0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX ++0x00028410 SX_ALPHA_TEST_CONTROL ++0x00028414 CB_BLEND_RED ++0x00028418 CB_BLEND_GREEN ++0x0002841C CB_BLEND_BLUE ++0x00028420 CB_BLEND_ALPHA ++0x00028430 DB_STENCILREFMASK ++0x00028434 DB_STENCILREFMASK_BF ++0x00028438 SX_ALPHA_REF ++0x0002843C PA_CL_VPORT_XSCALE_0 ++0x00028440 PA_CL_VPORT_XOFFSET_0 ++0x00028444 PA_CL_VPORT_YSCALE_0 ++0x00028448 PA_CL_VPORT_YOFFSET_0 ++0x0002844C PA_CL_VPORT_ZSCALE_0 ++0x00028450 PA_CL_VPORT_ZOFFSET_0 ++0x00028454 PA_CL_VPORT_XSCALE_1 ++0x00028458 PA_CL_VPORT_XOFFSET_1 ++0x0002845C PA_CL_VPORT_YSCALE_1 ++0x00028460 PA_CL_VPORT_YOFFSET_1 ++0x00028464 PA_CL_VPORT_ZSCALE_1 ++0x00028468 PA_CL_VPORT_ZOFFSET_1 ++0x0002846C PA_CL_VPORT_XSCALE_2 ++0x00028470 PA_CL_VPORT_XOFFSET_2 ++0x00028474 PA_CL_VPORT_YSCALE_2 ++0x00028478 PA_CL_VPORT_YOFFSET_2 ++0x0002847C PA_CL_VPORT_ZSCALE_2 ++0x00028480 PA_CL_VPORT_ZOFFSET_2 ++0x00028484 PA_CL_VPORT_XSCALE_3 ++0x00028488 PA_CL_VPORT_XOFFSET_3 ++0x0002848C PA_CL_VPORT_YSCALE_3 ++0x00028490 PA_CL_VPORT_YOFFSET_3 ++0x00028494 PA_CL_VPORT_ZSCALE_3 ++0x00028498 PA_CL_VPORT_ZOFFSET_3 ++0x0002849C PA_CL_VPORT_XSCALE_4 ++0x000284A0 PA_CL_VPORT_XOFFSET_4 ++0x000284A4 PA_CL_VPORT_YSCALE_4 ++0x000284A8 PA_CL_VPORT_YOFFSET_4 ++0x000284AC PA_CL_VPORT_ZSCALE_4 ++0x000284B0 PA_CL_VPORT_ZOFFSET_4 ++0x000284B4 PA_CL_VPORT_XSCALE_5 ++0x000284B8 PA_CL_VPORT_XOFFSET_5 ++0x000284BC PA_CL_VPORT_YSCALE_5 ++0x000284C0 PA_CL_VPORT_YOFFSET_5 ++0x000284C4 PA_CL_VPORT_ZSCALE_5 ++0x000284C8 PA_CL_VPORT_ZOFFSET_5 ++0x000284CC PA_CL_VPORT_XSCALE_6 ++0x000284D0 PA_CL_VPORT_XOFFSET_6 ++0x000284D4 PA_CL_VPORT_YSCALE_6 ++0x000284D8 PA_CL_VPORT_YOFFSET_6 ++0x000284DC PA_CL_VPORT_ZSCALE_6 ++0x000284E0 PA_CL_VPORT_ZOFFSET_6 ++0x000284E4 PA_CL_VPORT_XSCALE_7 ++0x000284E8 PA_CL_VPORT_XOFFSET_7 ++0x000284EC PA_CL_VPORT_YSCALE_7 ++0x000284F0 PA_CL_VPORT_YOFFSET_7 ++0x000284F4 PA_CL_VPORT_ZSCALE_7 ++0x000284F8 PA_CL_VPORT_ZOFFSET_7 ++0x000284FC PA_CL_VPORT_XSCALE_8 ++0x00028500 PA_CL_VPORT_XOFFSET_8 ++0x00028504 PA_CL_VPORT_YSCALE_8 ++0x00028508 PA_CL_VPORT_YOFFSET_8 ++0x0002850C PA_CL_VPORT_ZSCALE_8 ++0x00028510 PA_CL_VPORT_ZOFFSET_8 ++0x00028514 PA_CL_VPORT_XSCALE_9 ++0x00028518 PA_CL_VPORT_XOFFSET_9 ++0x0002851C PA_CL_VPORT_YSCALE_9 ++0x00028520 PA_CL_VPORT_YOFFSET_9 ++0x00028524 PA_CL_VPORT_ZSCALE_9 ++0x00028528 PA_CL_VPORT_ZOFFSET_9 ++0x0002852C PA_CL_VPORT_XSCALE_10 ++0x00028530 PA_CL_VPORT_XOFFSET_10 ++0x00028534 PA_CL_VPORT_YSCALE_10 ++0x00028538 PA_CL_VPORT_YOFFSET_10 ++0x0002853C PA_CL_VPORT_ZSCALE_10 ++0x00028540 PA_CL_VPORT_ZOFFSET_10 ++0x00028544 PA_CL_VPORT_XSCALE_11 ++0x00028548 PA_CL_VPORT_XOFFSET_11 ++0x0002854C PA_CL_VPORT_YSCALE_11 ++0x00028550 PA_CL_VPORT_YOFFSET_11 ++0x00028554 PA_CL_VPORT_ZSCALE_11 ++0x00028558 PA_CL_VPORT_ZOFFSET_11 ++0x0002855C PA_CL_VPORT_XSCALE_12 ++0x00028560 PA_CL_VPORT_XOFFSET_12 ++0x00028564 PA_CL_VPORT_YSCALE_12 ++0x00028568 PA_CL_VPORT_YOFFSET_12 ++0x0002856C PA_CL_VPORT_ZSCALE_12 ++0x00028570 PA_CL_VPORT_ZOFFSET_12 ++0x00028574 PA_CL_VPORT_XSCALE_13 ++0x00028578 PA_CL_VPORT_XOFFSET_13 ++0x0002857C PA_CL_VPORT_YSCALE_13 ++0x00028580 PA_CL_VPORT_YOFFSET_13 ++0x00028584 PA_CL_VPORT_ZSCALE_13 ++0x00028588 PA_CL_VPORT_ZOFFSET_13 ++0x0002858C PA_CL_VPORT_XSCALE_14 ++0x00028590 PA_CL_VPORT_XOFFSET_14 ++0x00028594 PA_CL_VPORT_YSCALE_14 ++0x00028598 PA_CL_VPORT_YOFFSET_14 ++0x0002859C PA_CL_VPORT_ZSCALE_14 ++0x000285A0 PA_CL_VPORT_ZOFFSET_14 ++0x000285A4 PA_CL_VPORT_XSCALE_15 ++0x000285A8 PA_CL_VPORT_XOFFSET_15 ++0x000285AC PA_CL_VPORT_YSCALE_15 ++0x000285B0 PA_CL_VPORT_YOFFSET_15 ++0x000285B4 PA_CL_VPORT_ZSCALE_15 ++0x000285B8 PA_CL_VPORT_ZOFFSET_15 ++0x000285BC PA_CL_UCP_0_X ++0x000285C0 PA_CL_UCP_0_Y ++0x000285C4 PA_CL_UCP_0_Z ++0x000285C8 PA_CL_UCP_0_W ++0x000285CC PA_CL_UCP_1_X ++0x000285D0 PA_CL_UCP_1_Y ++0x000285D4 PA_CL_UCP_1_Z ++0x000285D8 PA_CL_UCP_1_W ++0x000285DC PA_CL_UCP_2_X ++0x000285E0 PA_CL_UCP_2_Y ++0x000285E4 PA_CL_UCP_2_Z ++0x000285E8 PA_CL_UCP_2_W ++0x000285EC PA_CL_UCP_3_X ++0x000285F0 PA_CL_UCP_3_Y ++0x000285F4 PA_CL_UCP_3_Z ++0x000285F8 PA_CL_UCP_3_W ++0x000285FC PA_CL_UCP_4_X ++0x00028600 PA_CL_UCP_4_Y ++0x00028604 PA_CL_UCP_4_Z ++0x00028608 PA_CL_UCP_4_W ++0x0002860C PA_CL_UCP_5_X ++0x00028610 PA_CL_UCP_5_Y ++0x00028614 PA_CL_UCP_5_Z ++0x00028618 PA_CL_UCP_5_W ++0x0002861C SPI_VS_OUT_ID_0 ++0x00028620 SPI_VS_OUT_ID_1 ++0x00028624 SPI_VS_OUT_ID_2 ++0x00028628 SPI_VS_OUT_ID_3 ++0x0002862C SPI_VS_OUT_ID_4 ++0x00028630 SPI_VS_OUT_ID_5 ++0x00028634 SPI_VS_OUT_ID_6 ++0x00028638 SPI_VS_OUT_ID_7 ++0x0002863C SPI_VS_OUT_ID_8 ++0x00028640 SPI_VS_OUT_ID_9 ++0x00028644 SPI_PS_INPUT_CNTL_0 ++0x00028648 SPI_PS_INPUT_CNTL_1 ++0x0002864C SPI_PS_INPUT_CNTL_2 ++0x00028650 SPI_PS_INPUT_CNTL_3 ++0x00028654 SPI_PS_INPUT_CNTL_4 ++0x00028658 SPI_PS_INPUT_CNTL_5 ++0x0002865C SPI_PS_INPUT_CNTL_6 ++0x00028660 SPI_PS_INPUT_CNTL_7 ++0x00028664 SPI_PS_INPUT_CNTL_8 ++0x00028668 SPI_PS_INPUT_CNTL_9 ++0x0002866C SPI_PS_INPUT_CNTL_10 ++0x00028670 SPI_PS_INPUT_CNTL_11 ++0x00028674 SPI_PS_INPUT_CNTL_12 ++0x00028678 SPI_PS_INPUT_CNTL_13 ++0x0002867C SPI_PS_INPUT_CNTL_14 ++0x00028680 SPI_PS_INPUT_CNTL_15 ++0x00028684 SPI_PS_INPUT_CNTL_16 ++0x00028688 SPI_PS_INPUT_CNTL_17 ++0x0002868C SPI_PS_INPUT_CNTL_18 ++0x00028690 SPI_PS_INPUT_CNTL_19 ++0x00028694 SPI_PS_INPUT_CNTL_20 ++0x00028698 SPI_PS_INPUT_CNTL_21 ++0x0002869C SPI_PS_INPUT_CNTL_22 ++0x000286A0 SPI_PS_INPUT_CNTL_23 ++0x000286A4 SPI_PS_INPUT_CNTL_24 ++0x000286A8 SPI_PS_INPUT_CNTL_25 ++0x000286AC SPI_PS_INPUT_CNTL_26 ++0x000286B0 SPI_PS_INPUT_CNTL_27 ++0x000286B4 SPI_PS_INPUT_CNTL_28 ++0x000286B8 SPI_PS_INPUT_CNTL_29 ++0x000286BC SPI_PS_INPUT_CNTL_30 ++0x000286C0 SPI_PS_INPUT_CNTL_31 ++0x000286C4 SPI_VS_OUT_CONFIG ++0x000286C8 SPI_THREAD_GROUPING ++0x000286CC SPI_PS_IN_CONTROL_0 ++0x000286D0 SPI_PS_IN_CONTROL_1 ++0x000286D4 SPI_INTERP_CONTROL_0 ++0x000286D8 SPI_INPUT_Z ++0x000286DC SPI_FOG_CNTL ++0x000286E0 SPI_BARYC_CNTL ++0x000286E4 SPI_PS_IN_CONTROL_2 ++0x000286E8 SPI_COMPUTE_INPUT_CNTL ++0x000286EC SPI_COMPUTE_NUM_THREAD_X ++0x000286F0 SPI_COMPUTE_NUM_THREAD_Y ++0x000286F4 SPI_COMPUTE_NUM_THREAD_Z ++0x000286F8 GDS_ADDR_SIZE ++0x00028780 CB_BLEND0_CONTROL ++0x00028784 CB_BLEND1_CONTROL ++0x00028788 CB_BLEND2_CONTROL ++0x0002878C CB_BLEND3_CONTROL ++0x00028790 CB_BLEND4_CONTROL ++0x00028794 CB_BLEND5_CONTROL ++0x00028798 CB_BLEND6_CONTROL ++0x0002879C CB_BLEND7_CONTROL ++0x000287CC CS_COPY_STATE ++0x000287D0 GFX_COPY_STATE ++0x000287D4 PA_CL_POINT_X_RAD ++0x000287D8 PA_CL_POINT_Y_RAD ++0x000287DC PA_CL_POINT_SIZE ++0x000287E0 PA_CL_POINT_CULL_RAD ++0x00028808 CB_COLOR_CONTROL ++0x0002880C DB_SHADER_CONTROL ++0x00028810 PA_CL_CLIP_CNTL ++0x00028814 PA_SU_SC_MODE_CNTL ++0x00028818 PA_CL_VTE_CNTL ++0x0002881C PA_CL_VS_OUT_CNTL ++0x00028820 PA_CL_NANINF_CNTL ++0x00028824 PA_SU_LINE_STIPPLE_CNTL ++0x00028828 PA_SU_LINE_STIPPLE_SCALE ++0x0002882C PA_SU_PRIM_FILTER_CNTL ++0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1 ++0x00028844 SQ_PGM_RESOURCES_PS ++0x00028848 SQ_PGM_RESOURCES_2_PS ++0x0002884C SQ_PGM_EXPORTS_PS ++0x0002885C SQ_PGM_RESOURCES_VS ++0x00028860 SQ_PGM_RESOURCES_2_VS ++0x00028878 SQ_PGM_RESOURCES_GS ++0x0002887C SQ_PGM_RESOURCES_2_GS ++0x00028890 SQ_PGM_RESOURCES_ES ++0x00028894 SQ_PGM_RESOURCES_2_ES ++0x000288A8 SQ_PGM_RESOURCES_FS ++0x000288BC SQ_PGM_RESOURCES_HS ++0x000288C0 SQ_PGM_RESOURCES_2_HS ++0x000288D0 SQ_PGM_RESOURCES_LS ++0x000288D4 SQ_PGM_RESOURCES_2_LS ++0x000288E8 SQ_LDS_ALLOC ++0x000288EC SQ_LDS_ALLOC_PS ++0x000288F0 SQ_VTX_SEMANTIC_CLEAR ++0x00028A00 PA_SU_POINT_SIZE ++0x00028A04 PA_SU_POINT_MINMAX ++0x00028A08 PA_SU_LINE_CNTL ++0x00028A0C PA_SC_LINE_STIPPLE ++0x00028A10 VGT_OUTPUT_PATH_CNTL ++0x00028A14 VGT_HOS_CNTL ++0x00028A18 VGT_HOS_MAX_TESS_LEVEL ++0x00028A1C VGT_HOS_MIN_TESS_LEVEL ++0x00028A20 VGT_HOS_REUSE_DEPTH ++0x00028A24 VGT_GROUP_PRIM_TYPE ++0x00028A28 VGT_GROUP_FIRST_DECR ++0x00028A2C VGT_GROUP_DECR ++0x00028A30 VGT_GROUP_VECT_0_CNTL ++0x00028A34 VGT_GROUP_VECT_1_CNTL ++0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL ++0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL ++0x00028A40 VGT_GS_MODE ++0x00028A48 PA_SC_MODE_CNTL_0 ++0x00028A4C PA_SC_MODE_CNTL_1 ++0x00028A50 VGT_ENHANCE ++0x00028A54 VGT_GS_PER_ES ++0x00028A58 VGT_ES_PER_GS ++0x00028A5C VGT_GS_PER_VS ++0x00028A6C VGT_GS_OUT_PRIM_TYPE ++0x00028A84 VGT_PRIMITIVEID_EN ++0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN ++0x00028AA0 VGT_INSTANCE_STEP_RATE_0 ++0x00028AA4 VGT_INSTANCE_STEP_RATE_1 ++0x00028AB4 VGT_REUSE_OFF ++0x00028AB8 VGT_VTX_CNT_EN ++0x00028ABC DB_HTILE_SURFACE ++0x00028AC0 DB_SRESULTS_COMPARE_STATE0 ++0x00028AC4 DB_SRESULTS_COMPARE_STATE1 ++0x00028AC8 DB_PRELOAD_CONTROL ++0x00028B38 VGT_GS_MAX_VERT_OUT ++0x00028B54 VGT_SHADER_STAGES_EN ++0x00028B58 VGT_LS_HS_CONFIG ++0x00028B5C VGT_LS_SIZE ++0x00028B60 VGT_HS_SIZE ++0x00028B64 VGT_LS_HS_ALLOC ++0x00028B68 VGT_HS_PATCH_CONST ++0x00028B6C VGT_TF_PARAM ++0x00028B70 DB_ALPHA_TO_MASK ++0x00028B74 VGT_DISPATCH_INITIATOR ++0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL ++0x00028B7C PA_SU_POLY_OFFSET_CLAMP ++0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE ++0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET ++0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE ++0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET ++0x00028B74 VGT_GS_INSTANCE_CNT ++0x00028C00 PA_SC_LINE_CNTL ++0x00028C08 PA_SU_VTX_CNTL ++0x00028C0C PA_CL_GB_VERT_CLIP_ADJ ++0x00028C10 PA_CL_GB_VERT_DISC_ADJ ++0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ ++0x00028C18 PA_CL_GB_HORZ_DISC_ADJ ++0x00028C1C PA_SC_AA_SAMPLE_LOCS_0 ++0x00028C20 PA_SC_AA_SAMPLE_LOCS_1 ++0x00028C24 PA_SC_AA_SAMPLE_LOCS_2 ++0x00028C28 PA_SC_AA_SAMPLE_LOCS_3 ++0x00028C2C PA_SC_AA_SAMPLE_LOCS_4 ++0x00028C30 PA_SC_AA_SAMPLE_LOCS_5 ++0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 ++0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 ++0x00028C3C PA_SC_AA_MASK ++0x00028C8C CB_COLOR0_CLEAR_WORD0 ++0x00028C90 CB_COLOR0_CLEAR_WORD1 ++0x00028C94 CB_COLOR0_CLEAR_WORD2 ++0x00028C98 CB_COLOR0_CLEAR_WORD3 ++0x00028CC8 CB_COLOR1_CLEAR_WORD0 ++0x00028CCC CB_COLOR1_CLEAR_WORD1 ++0x00028CD0 CB_COLOR1_CLEAR_WORD2 ++0x00028CD4 CB_COLOR1_CLEAR_WORD3 ++0x00028D04 CB_COLOR2_CLEAR_WORD0 ++0x00028D08 CB_COLOR2_CLEAR_WORD1 ++0x00028D0C CB_COLOR2_CLEAR_WORD2 ++0x00028D10 CB_COLOR2_CLEAR_WORD3 ++0x00028D40 CB_COLOR3_CLEAR_WORD0 ++0x00028D44 CB_COLOR3_CLEAR_WORD1 ++0x00028D48 CB_COLOR3_CLEAR_WORD2 ++0x00028D4C CB_COLOR3_CLEAR_WORD3 ++0x00028D7C CB_COLOR4_CLEAR_WORD0 ++0x00028D80 CB_COLOR4_CLEAR_WORD1 ++0x00028D84 CB_COLOR4_CLEAR_WORD2 ++0x00028D88 CB_COLOR4_CLEAR_WORD3 ++0x00028DB8 CB_COLOR5_CLEAR_WORD0 ++0x00028DBC CB_COLOR5_CLEAR_WORD1 ++0x00028DC0 CB_COLOR5_CLEAR_WORD2 ++0x00028DC4 CB_COLOR5_CLEAR_WORD3 ++0x00028DF4 CB_COLOR6_CLEAR_WORD0 ++0x00028DF8 CB_COLOR6_CLEAR_WORD1 ++0x00028DFC CB_COLOR6_CLEAR_WORD2 ++0x00028E00 CB_COLOR6_CLEAR_WORD3 ++0x00028E30 CB_COLOR7_CLEAR_WORD0 ++0x00028E34 CB_COLOR7_CLEAR_WORD1 ++0x00028E38 CB_COLOR7_CLEAR_WORD2 ++0x00028E3C CB_COLOR7_CLEAR_WORD3 ++0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 ++0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 ++0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 ++0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 ++0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 ++0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 ++0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 ++0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 ++0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 ++0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 ++0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 ++0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 ++0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 ++0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 ++0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 ++0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 ++0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 ++0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 ++0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 ++0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 ++0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 ++0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 ++0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 ++0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 ++0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 ++0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 ++0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 ++0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 ++0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 ++0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 ++0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 ++0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 ++0x0003CFF0 SQ_VTX_BASE_VTX_LOC ++0x0003CFF4 SQ_VTX_START_INST_LOC ++0x0003FF00 SQ_TEX_SAMPLER_CLEAR ++0x0003FF04 SQ_TEX_RESOURCE_CLEAR ++0x0003FF08 SQ_LOOP_BOOL_CLEAR +diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c +index 1a41cb2..9e4240b 100644 +--- a/drivers/gpu/drm/radeon/rs400.c ++++ b/drivers/gpu/drm/radeon/rs400.c +@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) + + void rs400_gpu_init(struct radeon_device *rdev) + { +- /* FIXME: HDP same place on rs400 ? */ +- r100_hdp_reset(rdev); + /* FIXME: is this correct ? */ + r420_pipes_init(rdev); + if (rs400_mc_wait_for_idle(rdev)) { +@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev) + /* setup MC before calling post tables */ + rs400_mc_program(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev) + + void rs400_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev) + return r; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev) + + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize memory controller */ + rs400_mc_init(rdev); + /* Fence driver */ +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c +index a81bc7a..7bb4c3e 100644 +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -46,6 +46,136 @@ + void rs600_gpu_init(struct radeon_device *rdev); + int rs600_mc_wait_for_idle(struct radeon_device *rdev); + ++void rs600_pm_misc(struct radeon_device *rdev) ++{ ++ int requested_index = rdev->pm.requested_power_state_index; ++ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; ++ struct radeon_voltage *voltage = &ps->clock_info[0].voltage; ++ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; ++ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; ++ ++ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { ++ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { ++ tmp = RREG32(voltage->gpio.reg); ++ if (voltage->active_high) ++ tmp |= voltage->gpio.mask; ++ else ++ tmp &= ~(voltage->gpio.mask); ++ WREG32(voltage->gpio.reg, tmp); ++ if (voltage->delay) ++ udelay(voltage->delay); ++ } else { ++ tmp = RREG32(voltage->gpio.reg); ++ if (voltage->active_high) ++ tmp &= ~voltage->gpio.mask; ++ else ++ tmp |= voltage->gpio.mask; ++ WREG32(voltage->gpio.reg, tmp); ++ if (voltage->delay) ++ udelay(voltage->delay); ++ } ++ } else if (voltage->type == VOLTAGE_VDDC) ++ radeon_atom_set_voltage(rdev, voltage->vddc_id); ++ ++ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); ++ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); ++ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); ++ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { ++ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { ++ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); ++ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); ++ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { ++ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); ++ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); ++ } ++ } else { ++ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); ++ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); ++ } ++ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); ++ ++ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); ++ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { ++ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; ++ if (voltage->delay) { ++ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; ++ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); ++ } else ++ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; ++ } else ++ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; ++ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); ++ ++ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); ++ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) ++ hdp_dyn_cntl &= ~HDP_FORCEON; ++ else ++ hdp_dyn_cntl |= HDP_FORCEON; ++ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); ++#if 0 ++ /* mc_host_dyn seems to cause hangs from time to time */ ++ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); ++ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) ++ mc_host_dyn_cntl &= ~MC_HOST_FORCEON; ++ else ++ mc_host_dyn_cntl |= MC_HOST_FORCEON; ++ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); ++#endif ++ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); ++ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) ++ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; ++ else ++ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; ++ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); ++ ++ /* set pcie lanes */ ++ if ((rdev->flags & RADEON_IS_PCIE) && ++ !(rdev->flags & RADEON_IS_IGP) && ++ rdev->asic->set_pcie_lanes && ++ (ps->pcie_lanes != ++ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { ++ radeon_set_pcie_lanes(rdev, ++ ps->pcie_lanes); ++ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); ++ } ++} ++ ++void rs600_pm_prepare(struct radeon_device *rdev) ++{ ++ struct drm_device *ddev = rdev->ddev; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ u32 tmp; ++ ++ /* disable any active CRTCs */ ++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { ++ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); ++ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; ++ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); ++ } ++ } ++} ++ ++void rs600_pm_finish(struct radeon_device *rdev) ++{ ++ struct drm_device *ddev = rdev->ddev; ++ struct drm_crtc *crtc; ++ struct radeon_crtc *radeon_crtc; ++ u32 tmp; ++ ++ /* enable any active CRTCs */ ++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { ++ radeon_crtc = to_radeon_crtc(crtc); ++ if (radeon_crtc->enabled) { ++ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); ++ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; ++ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); ++ } ++ } ++} ++ + /* hpd for digital panel detect/disconnect */ + bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) + { +@@ -147,6 +277,78 @@ void rs600_hpd_fini(struct radeon_device *rdev) + } + } + ++void rs600_bm_disable(struct radeon_device *rdev) ++{ ++ u32 tmp; ++ ++ /* disable bus mastering */ ++ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); ++ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); ++ mdelay(1); ++} ++ ++int rs600_asic_reset(struct radeon_device *rdev) ++{ ++ u32 status, tmp; ++ ++ struct rv515_mc_save save; ++ ++ /* Stops all mc clients */ ++ rv515_mc_stop(rdev, &save); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ if (!G_000E40_GUI_ACTIVE(status)) { ++ return 0; ++ } ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* stop CP */ ++ WREG32(RADEON_CP_CSQ_CNTL, 0); ++ tmp = RREG32(RADEON_CP_RB_CNTL); ++ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); ++ WREG32(RADEON_CP_RB_RPTR_WR, 0); ++ WREG32(RADEON_CP_RB_WPTR, 0); ++ WREG32(RADEON_CP_RB_CNTL, tmp); ++ pci_save_state(rdev->pdev); ++ /* disable bus mastering */ ++ rs600_bm_disable(rdev); ++ /* reset GA+VAP */ ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | ++ S_0000F0_SOFT_RESET_GA(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* reset CP */ ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* reset MC */ ++ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); ++ RREG32(R_0000F0_RBBM_SOFT_RESET); ++ mdelay(500); ++ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); ++ mdelay(1); ++ status = RREG32(R_000E40_RBBM_STATUS); ++ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); ++ /* restore PCI & busmastering */ ++ pci_restore_state(rdev->pdev); ++ /* Check if GPU is idle */ ++ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { ++ dev_err(rdev->dev, "failed to reset GPU\n"); ++ rdev->gpu_lockup = true; ++ return -1; ++ } ++ rv515_mc_resume(rdev, &save); ++ dev_info(rdev->dev, "GPU reset succeed\n"); ++ return 0; ++} ++ + /* + * GART. + */ +@@ -310,6 +512,9 @@ int rs600_irq_set(struct radeon_device *rdev) + if (rdev->irq.sw_int) { + tmp |= S_000040_SW_INT_EN(1); + } ++ if (rdev->irq.gui_idle) { ++ tmp |= S_000040_GUI_IDLE(1); ++ } + if (rdev->irq.crtc_vblank_int[0]) { + mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); + } +@@ -332,9 +537,15 @@ int rs600_irq_set(struct radeon_device *rdev) + static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) + { + uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); +- uint32_t irq_mask = ~C_000044_SW_INT; ++ uint32_t irq_mask = S_000044_SW_INT(1); + u32 tmp; + ++ /* the interrupt works, but the status bit is permanently asserted */ ++ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { ++ if (!rdev->irq.gui_idle_acked) ++ irq_mask |= S_000044_GUI_IDLE_STAT(1); ++ } ++ + if (G_000044_DISPLAY_INT_STAT(irqs)) { + *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { +@@ -382,6 +593,9 @@ int rs600_irq_process(struct radeon_device *rdev) + uint32_t r500_disp_int; + bool queue_hotplug = false; + ++ /* reset gui idle ack. the status bit is broken */ ++ rdev->irq.gui_idle_acked = false; ++ + status = rs600_irq_ack(rdev, &r500_disp_int); + if (!status && !r500_disp_int) { + return IRQ_NONE; +@@ -390,6 +604,12 @@ int rs600_irq_process(struct radeon_device *rdev) + /* SW interrupt */ + if (G_000044_SW_INT(status)) + radeon_fence_process(rdev); ++ /* GUI idle */ ++ if (G_000040_GUI_IDLE(status)) { ++ rdev->irq.gui_idle_acked = true; ++ rdev->pm.gui_idle = true; ++ wake_up(&rdev->irq.idle_queue); ++ } + /* Vertical blank interrupts */ + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { + drm_handle_vblank(rdev->ddev, 0); +@@ -411,6 +631,8 @@ int rs600_irq_process(struct radeon_device *rdev) + } + status = rs600_irq_ack(rdev, &r500_disp_int); + } ++ /* reset gui idle ack. the status bit is broken */ ++ rdev->irq.gui_idle_acked = false; + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); + if (rdev->msi_enabled) { +@@ -454,7 +676,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) + + void rs600_gpu_init(struct radeon_device *rdev) + { +- r100_hdp_reset(rdev); + r420_pipes_init(rdev); + /* Wait for mc idle */ + if (rs600_mc_wait_for_idle(rdev)) +@@ -601,7 +822,7 @@ int rs600_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + rv515_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -626,7 +847,6 @@ int rs600_suspend(struct radeon_device *rdev) + + void rs600_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -664,7 +884,7 @@ int rs600_init(struct radeon_device *rdev) + return -EINVAL; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -676,8 +896,6 @@ int rs600_init(struct radeon_device *rdev) + + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize memory controller */ + rs600_mc_init(rdev); + rs600_debugfs(rdev); +diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h +index e52d269..a27c13a 100644 +--- a/drivers/gpu/drm/radeon/rs600d.h ++++ b/drivers/gpu/drm/radeon/rs600d.h +@@ -178,6 +178,52 @@ + #define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) + #define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) + #define C_000074_MC_IND_DATA 0x00000000 ++#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 ++#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) ++#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) ++#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE ++#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) ++#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) ++#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD ++#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) ++#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) ++#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB ++#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) ++#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) ++#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 ++#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) ++#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) ++#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF ++#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) ++#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) ++#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF ++#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) ++#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) ++#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF ++#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) ++#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) ++#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F ++#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) ++#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) ++#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF ++#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) ++#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) ++#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF ++#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) ++#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) ++#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF ++#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) ++#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) ++#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF ++#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) ++#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) ++#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF ++#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) ++#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) ++#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF ++#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) ++#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) ++#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF + #define R_000134_HDP_FB_LOCATION 0x000134 + #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) + #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) +@@ -588,4 +634,38 @@ + #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) + #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF + ++/* PLL regs */ ++#define GENERAL_PWRMGT 0x8 ++#define GLOBAL_PWRMGT_EN (1 << 0) ++#define MOBILE_SU (1 << 2) ++#define DYN_PWRMGT_SCLK_LENGTH 0xc ++#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0) ++#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4) ++#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8) ++#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12) ++#define POWER_D1_SCLK_HILEN(x) ((x) << 16) ++#define POWER_D1_SCLK_LOLEN(x) ((x) << 20) ++#define STATIC_SCREEN_HILEN(x) ((x) << 24) ++#define STATIC_SCREEN_LOLEN(x) ((x) << 28) ++#define DYN_SCLK_VOL_CNTL 0xe ++#define IO_CG_VOLTAGE_DROP (1 << 0) ++#define VOLTAGE_DROP_SYNC (1 << 2) ++#define VOLTAGE_DELAY_SEL(x) ((x) << 3) ++#define HDP_DYN_CNTL 0x10 ++#define HDP_FORCEON (1 << 0) ++#define MC_HOST_DYN_CNTL 0x1e ++#define MC_HOST_FORCEON (1 << 0) ++#define DYN_BACKBIAS_CNTL 0x29 ++#define IO_CG_BACKBIAS_EN (1 << 0) ++ ++/* mmreg */ ++#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0 ++#define PWRDN_WAIT_BUSY_OFF (1 << 0) ++#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4) ++#define PWRDN_WAIT_PPLL_OFF (1 << 8) ++#define PWRUP_WAIT_PPLL_ON (1 << 12) ++#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16) ++#define PM_ASSERT_RESET (1 << 20) ++#define PM_PWRDN_PPLL (1 << 24) ++ + #endif +diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c +index bbf3da7..bcc3319 100644 +--- a/drivers/gpu/drm/radeon/rs690.c ++++ b/drivers/gpu/drm/radeon/rs690.c +@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev) + + static void rs690_gpu_init(struct radeon_device *rdev) + { +- /* FIXME: HDP same place on rs690 ? */ +- r100_hdp_reset(rdev); + /* FIXME: is this correct ? */ + r420_pipes_init(rdev); + if (rs690_mc_wait_for_idle(rdev)) { +@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev) + /* Get various system informations from bios */ + switch (crev) { + case 1: +- tmp.full = rfixed_const(100); +- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); +- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); +- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); +- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); +- rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); ++ tmp.full = dfixed_const(100); ++ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); ++ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); ++ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); ++ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); ++ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); + break; + case 2: +- tmp.full = rfixed_const(100); +- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); +- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); +- rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); +- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); +- rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); +- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); +- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); ++ tmp.full = dfixed_const(100); ++ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); ++ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); ++ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); ++ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); ++ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); ++ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); ++ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); + break; + default: +- tmp.full = rfixed_const(100); ++ tmp.full = dfixed_const(100); + /* We assume the slower possible clock ie worst case */ + /* DDR 333Mhz */ +- rdev->pm.igp_sideport_mclk.full = rfixed_const(333); ++ rdev->pm.igp_sideport_mclk.full = dfixed_const(333); + /* FIXME: system clock ? */ +- rdev->pm.igp_system_mclk.full = rfixed_const(100); +- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); +- rdev->pm.igp_ht_link_clk.full = rfixed_const(200); +- rdev->pm.igp_ht_link_width.full = rfixed_const(8); ++ rdev->pm.igp_system_mclk.full = dfixed_const(100); ++ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); ++ rdev->pm.igp_ht_link_clk.full = dfixed_const(200); ++ rdev->pm.igp_ht_link_width.full = dfixed_const(8); + DRM_ERROR("No integrated system info for your GPU, using safe default\n"); + break; + } + } else { +- tmp.full = rfixed_const(100); ++ tmp.full = dfixed_const(100); + /* We assume the slower possible clock ie worst case */ + /* DDR 333Mhz */ +- rdev->pm.igp_sideport_mclk.full = rfixed_const(333); ++ rdev->pm.igp_sideport_mclk.full = dfixed_const(333); + /* FIXME: system clock ? */ +- rdev->pm.igp_system_mclk.full = rfixed_const(100); +- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); +- rdev->pm.igp_ht_link_clk.full = rfixed_const(200); +- rdev->pm.igp_ht_link_width.full = rfixed_const(8); ++ rdev->pm.igp_system_mclk.full = dfixed_const(100); ++ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); ++ rdev->pm.igp_ht_link_clk.full = dfixed_const(200); ++ rdev->pm.igp_ht_link_width.full = dfixed_const(8); + DRM_ERROR("No integrated system info for your GPU, using safe default\n"); + } + /* Compute various bandwidth */ + /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ +- tmp.full = rfixed_const(4); +- rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); ++ tmp.full = dfixed_const(4); ++ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); + /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 + * = ht_clk * ht_width / 5 + */ +- tmp.full = rfixed_const(5); +- rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, ++ tmp.full = dfixed_const(5); ++ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, + rdev->pm.igp_ht_link_width); +- rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); ++ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); + if (tmp.full < rdev->pm.max_bandwidth.full) { + /* HT link is a limiting factor */ + rdev->pm.max_bandwidth.full = tmp.full; +@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev) + /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 + * = (sideport_clk * 14) / 10 + */ +- tmp.full = rfixed_const(14); +- rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); +- tmp.full = rfixed_const(10); +- rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); ++ tmp.full = dfixed_const(14); ++ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); ++ tmp.full = dfixed_const(10); ++ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); + } + + void rs690_mc_init(struct radeon_device *rdev) +@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + return; + } + +- if (crtc->vsc.full > rfixed_const(2)) +- wm->num_line_pair.full = rfixed_const(2); ++ if (crtc->vsc.full > dfixed_const(2)) ++ wm->num_line_pair.full = dfixed_const(2); + else +- wm->num_line_pair.full = rfixed_const(1); +- +- b.full = rfixed_const(mode->crtc_hdisplay); +- c.full = rfixed_const(256); +- a.full = rfixed_div(b, c); +- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); +- request_fifo_depth.full = rfixed_ceil(request_fifo_depth); +- if (a.full < rfixed_const(4)) { ++ wm->num_line_pair.full = dfixed_const(1); ++ ++ b.full = dfixed_const(mode->crtc_hdisplay); ++ c.full = dfixed_const(256); ++ a.full = dfixed_div(b, c); ++ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); ++ request_fifo_depth.full = dfixed_ceil(request_fifo_depth); ++ if (a.full < dfixed_const(4)) { + wm->lb_request_fifo_depth = 4; + } else { +- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); ++ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); + } + + /* Determine consumption rate +@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + * vsc = vertical scaling ratio, defined as source/destination + * hsc = horizontal scaling ration, defined as source/destination + */ +- a.full = rfixed_const(mode->clock); +- b.full = rfixed_const(1000); +- a.full = rfixed_div(a, b); +- pclk.full = rfixed_div(b, a); ++ a.full = dfixed_const(mode->clock); ++ b.full = dfixed_const(1000); ++ a.full = dfixed_div(a, b); ++ pclk.full = dfixed_div(b, a); + if (crtc->rmx_type != RMX_OFF) { +- b.full = rfixed_const(2); ++ b.full = dfixed_const(2); + if (crtc->vsc.full > b.full) + b.full = crtc->vsc.full; +- b.full = rfixed_mul(b, crtc->hsc); +- c.full = rfixed_const(2); +- b.full = rfixed_div(b, c); +- consumption_time.full = rfixed_div(pclk, b); ++ b.full = dfixed_mul(b, crtc->hsc); ++ c.full = dfixed_const(2); ++ b.full = dfixed_div(b, c); ++ consumption_time.full = dfixed_div(pclk, b); + } else { + consumption_time.full = pclk.full; + } +- a.full = rfixed_const(1); +- wm->consumption_rate.full = rfixed_div(a, consumption_time); ++ a.full = dfixed_const(1); ++ wm->consumption_rate.full = dfixed_div(a, consumption_time); + + + /* Determine line time +@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + * LineTime = total number of horizontal pixels + * pclk = pixel clock period(ns) + */ +- a.full = rfixed_const(crtc->base.mode.crtc_htotal); +- line_time.full = rfixed_mul(a, pclk); ++ a.full = dfixed_const(crtc->base.mode.crtc_htotal); ++ line_time.full = dfixed_mul(a, pclk); + + /* Determine active time + * ActiveTime = time of active region of display within one line, + * hactive = total number of horizontal active pixels + * htotal = total number of horizontal pixels + */ +- a.full = rfixed_const(crtc->base.mode.crtc_htotal); +- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); +- wm->active_time.full = rfixed_mul(line_time, b); +- wm->active_time.full = rfixed_div(wm->active_time, a); ++ a.full = dfixed_const(crtc->base.mode.crtc_htotal); ++ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); ++ wm->active_time.full = dfixed_mul(line_time, b); ++ wm->active_time.full = dfixed_div(wm->active_time, a); + + /* Maximun bandwidth is the minimun bandwidth of all component */ + rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; +@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && + rdev->pm.sideport_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; +- read_delay_latency.full = rfixed_const(370 * 800 * 1000); +- read_delay_latency.full = rfixed_div(read_delay_latency, ++ read_delay_latency.full = dfixed_const(370 * 800 * 1000); ++ read_delay_latency.full = dfixed_div(read_delay_latency, + rdev->pm.igp_sideport_mclk); + } else { + if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && +@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && + rdev->pm.ht_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; +- read_delay_latency.full = rfixed_const(5000); ++ read_delay_latency.full = dfixed_const(5000); + } + + /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ +- a.full = rfixed_const(16); +- rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); +- a.full = rfixed_const(1000); +- rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); ++ a.full = dfixed_const(16); ++ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); ++ a.full = dfixed_const(1000); ++ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); + /* Determine chunk time + * ChunkTime = the time it takes the DCP to send one chunk of data + * to the LB which consists of pipeline delay and inter chunk gap + * sclk = system clock(ns) + */ +- a.full = rfixed_const(256 * 13); +- chunk_time.full = rfixed_mul(rdev->pm.sclk, a); +- a.full = rfixed_const(10); +- chunk_time.full = rfixed_div(chunk_time, a); ++ a.full = dfixed_const(256 * 13); ++ chunk_time.full = dfixed_mul(rdev->pm.sclk, a); ++ a.full = dfixed_const(10); ++ chunk_time.full = dfixed_div(chunk_time, a); + + /* Determine the worst case latency + * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) +@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + * ChunkTime = time it takes the DCP to send one chunk of data to the LB + * which consists of pipeline delay and inter chunk gap + */ +- if (rfixed_trunc(wm->num_line_pair) > 1) { +- a.full = rfixed_const(3); +- wm->worst_case_latency.full = rfixed_mul(a, chunk_time); ++ if (dfixed_trunc(wm->num_line_pair) > 1) { ++ a.full = dfixed_const(3); ++ wm->worst_case_latency.full = dfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } else { +- a.full = rfixed_const(2); +- wm->worst_case_latency.full = rfixed_mul(a, chunk_time); ++ a.full = dfixed_const(2); ++ wm->worst_case_latency.full = dfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } + +@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + * of data to the LB which consists of + * pipeline delay and inter chunk gap + */ +- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { ++ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { + tolerable_latency.full = line_time.full; + } else { +- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); ++ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; +- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); ++ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = line_time.full - tolerable_latency.full; + } + /* We assume worst case 32bits (4 bytes) */ +- wm->dbpp.full = rfixed_const(4 * 8); ++ wm->dbpp.full = dfixed_const(4 * 8); + + /* Determine the maximum priority mark + * width = viewport width in pixels + */ +- a.full = rfixed_const(16); +- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); +- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); +- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); ++ a.full = dfixed_const(16); ++ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); ++ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); ++ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); + + /* Determine estimated width */ + estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; +- estimated_width.full = rfixed_div(estimated_width, consumption_time); +- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { +- wm->priority_mark.full = rfixed_const(10); ++ estimated_width.full = dfixed_div(estimated_width, consumption_time); ++ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { ++ wm->priority_mark.full = dfixed_const(10); + } else { +- a.full = rfixed_const(16); +- wm->priority_mark.full = rfixed_div(estimated_width, a); +- wm->priority_mark.full = rfixed_ceil(wm->priority_mark); ++ a.full = dfixed_const(16); ++ wm->priority_mark.full = dfixed_div(estimated_width, a); ++ wm->priority_mark.full = dfixed_ceil(wm->priority_mark); + wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; + } + } +@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev) + WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); + + if (mode0 && mode1) { +- if (rfixed_trunc(wm0.dbpp) > 64) +- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); ++ if (dfixed_trunc(wm0.dbpp) > 64) ++ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; +- if (rfixed_trunc(wm1.dbpp) > 64) +- b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); ++ if (dfixed_trunc(wm1.dbpp) > 64) ++ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); + else + b.full = wm1.num_line_pair.full; + a.full += b.full; +- fill_rate.full = rfixed_div(wm0.sclk, a); ++ fill_rate.full = dfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm0.active_time); +- a.full = rfixed_mul(wm0.worst_case_latency, ++ b.full = dfixed_mul(b, wm0.active_time); ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + a.full = a.full + b.full; +- b.full = rfixed_const(16 * 1000); +- priority_mark02.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark02.full = dfixed_div(a, b); + } else { +- a.full = rfixed_mul(wm0.worst_case_latency, ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark02.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark02.full = dfixed_div(a, b); + } + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm1.active_time); +- a.full = rfixed_mul(wm1.worst_case_latency, ++ b.full = dfixed_mul(b, wm1.active_time); ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + a.full = a.full + b.full; +- b.full = rfixed_const(16 * 1000); +- priority_mark12.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark12.full = dfixed_div(a, b); + } else { +- a.full = rfixed_mul(wm1.worst_case_latency, ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark12.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark12.full = dfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; +- if (rfixed_trunc(priority_mark02) < 0) ++ if (dfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; +- if (rfixed_trunc(priority_mark12) < 0) ++ if (dfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; +- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); +- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); ++ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); ++ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) { + d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); + d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); +@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); + } else if (mode0) { +- if (rfixed_trunc(wm0.dbpp) > 64) +- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); ++ if (dfixed_trunc(wm0.dbpp) > 64) ++ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; +- fill_rate.full = rfixed_div(wm0.sclk, a); ++ fill_rate.full = dfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm0.active_time); +- a.full = rfixed_mul(wm0.worst_case_latency, ++ b.full = dfixed_mul(b, wm0.active_time); ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + a.full = a.full + b.full; +- b.full = rfixed_const(16 * 1000); +- priority_mark02.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark02.full = dfixed_div(a, b); + } else { +- a.full = rfixed_mul(wm0.worst_case_latency, ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark02.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark02.full = dfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; +- if (rfixed_trunc(priority_mark02) < 0) ++ if (dfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; +- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); ++ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + if (rdev->disp_priority == 2) + d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); +@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, + S_006D4C_D2MODE_PRIORITY_B_OFF(1)); + } else { +- if (rfixed_trunc(wm1.dbpp) > 64) +- a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); ++ if (dfixed_trunc(wm1.dbpp) > 64) ++ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); + else + a.full = wm1.num_line_pair.full; +- fill_rate.full = rfixed_div(wm1.sclk, a); ++ fill_rate.full = dfixed_div(wm1.sclk, a); + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm1.active_time); +- a.full = rfixed_mul(wm1.worst_case_latency, ++ b.full = dfixed_mul(b, wm1.active_time); ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + a.full = a.full + b.full; +- b.full = rfixed_const(16 * 1000); +- priority_mark12.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark12.full = dfixed_div(a, b); + } else { +- a.full = rfixed_mul(wm1.worst_case_latency, ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark12.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark12.full = dfixed_div(a, b); + } + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; +- if (rfixed_trunc(priority_mark12) < 0) ++ if (dfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; +- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); ++ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) + d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, +@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + rv515_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) + + void rs690_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev) + return -EINVAL; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev) + + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize memory controller */ + rs690_mc_init(rdev); + rv515_debugfs(rdev); +diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c +index 9035121..7d9a7b0 100644 +--- a/drivers/gpu/drm/radeon/rv515.c ++++ b/drivers/gpu/drm/radeon/rv515.c +@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev) + { + unsigned pipe_select_current, gb_pipe_select, tmp; + +- r100_hdp_reset(rdev); +- r100_rb2d_reset(rdev); +- + if (r100_gui_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait GUI idle while " + "reseting GPU. Bad things might happen.\n"); + } +- + rv515_vga_render_disable(rdev); +- + r420_pipes_init(rdev); + gb_pipe_select = RREG32(0x402C); + tmp = RREG32(0x170C); +@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev) + } + } + +-int rv515_ga_reset(struct radeon_device *rdev) +-{ +- uint32_t tmp; +- bool reinit_cp; +- int i; +- +- reinit_cp = rdev->cp.ready; +- rdev->cp.ready = false; +- for (i = 0; i < rdev->usec_timeout; i++) { +- WREG32(CP_CSQ_MODE, 0); +- WREG32(CP_CSQ_CNTL, 0); +- WREG32(RBBM_SOFT_RESET, 0x32005); +- (void)RREG32(RBBM_SOFT_RESET); +- udelay(200); +- WREG32(RBBM_SOFT_RESET, 0); +- /* Wait to prevent race in RBBM_STATUS */ +- mdelay(1); +- tmp = RREG32(RBBM_STATUS); +- if (tmp & ((1 << 20) | (1 << 26))) { +- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); +- /* GA still busy soft reset it */ +- WREG32(0x429C, 0x200); +- WREG32(VAP_PVS_STATE_FLUSH_REG, 0); +- WREG32(0x43E0, 0); +- WREG32(0x43E4, 0); +- WREG32(0x24AC, 0); +- } +- /* Wait to prevent race in RBBM_STATUS */ +- mdelay(1); +- tmp = RREG32(RBBM_STATUS); +- if (!(tmp & ((1 << 20) | (1 << 26)))) { +- break; +- } +- } +- for (i = 0; i < rdev->usec_timeout; i++) { +- tmp = RREG32(RBBM_STATUS); +- if (!(tmp & ((1 << 20) | (1 << 26)))) { +- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", +- tmp); +- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C)); +- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0)); +- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724)); +- if (reinit_cp) { +- return r100_cp_init(rdev, rdev->cp.ring_size); +- } +- return 0; +- } +- DRM_UDELAY(1); +- } +- tmp = RREG32(RBBM_STATUS); +- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); +- return -1; +-} +- +-int rv515_gpu_reset(struct radeon_device *rdev) +-{ +- uint32_t status; +- +- /* reset order likely matter */ +- status = RREG32(RBBM_STATUS); +- /* reset HDP */ +- r100_hdp_reset(rdev); +- /* reset rb2d */ +- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { +- r100_rb2d_reset(rdev); +- } +- /* reset GA */ +- if (status & ((1 << 20) | (1 << 26))) { +- rv515_ga_reset(rdev); +- } +- /* reset CP */ +- status = RREG32(RBBM_STATUS); +- if (status & (1 << 16)) { +- r100_cp_reset(rdev); +- } +- /* Check if GPU is idle */ +- status = RREG32(RBBM_STATUS); +- if (status & (1 << 31)) { +- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); +- return -1; +- } +- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); +- return 0; +-} +- + static void rv515_vram_get_type(struct radeon_device *rdev) + { + uint32_t tmp; +@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data) + + tmp = RREG32(0x2140); + seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp); +- radeon_gpu_reset(rdev); ++ radeon_asic_reset(rdev); + tmp = RREG32(0x425C); + seq_printf(m, "GA_IDLE 0x%08x\n", tmp); + return 0; +@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev) + /* Resume clock before doing reset */ + rv515_clock_startup(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); +@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) + + void rv515_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); +@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev) + return -EINVAL; + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ +- if (radeon_gpu_reset(rdev)) { ++ if (radeon_asic_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), +@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev) + return -EINVAL; + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); +@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + return; + } + +- if (crtc->vsc.full > rfixed_const(2)) +- wm->num_line_pair.full = rfixed_const(2); ++ if (crtc->vsc.full > dfixed_const(2)) ++ wm->num_line_pair.full = dfixed_const(2); + else +- wm->num_line_pair.full = rfixed_const(1); +- +- b.full = rfixed_const(mode->crtc_hdisplay); +- c.full = rfixed_const(256); +- a.full = rfixed_div(b, c); +- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); +- request_fifo_depth.full = rfixed_ceil(request_fifo_depth); +- if (a.full < rfixed_const(4)) { ++ wm->num_line_pair.full = dfixed_const(1); ++ ++ b.full = dfixed_const(mode->crtc_hdisplay); ++ c.full = dfixed_const(256); ++ a.full = dfixed_div(b, c); ++ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); ++ request_fifo_depth.full = dfixed_ceil(request_fifo_depth); ++ if (a.full < dfixed_const(4)) { + wm->lb_request_fifo_depth = 4; + } else { +- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); ++ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); + } + + /* Determine consumption rate +@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + * vsc = vertical scaling ratio, defined as source/destination + * hsc = horizontal scaling ration, defined as source/destination + */ +- a.full = rfixed_const(mode->clock); +- b.full = rfixed_const(1000); +- a.full = rfixed_div(a, b); +- pclk.full = rfixed_div(b, a); ++ a.full = dfixed_const(mode->clock); ++ b.full = dfixed_const(1000); ++ a.full = dfixed_div(a, b); ++ pclk.full = dfixed_div(b, a); + if (crtc->rmx_type != RMX_OFF) { +- b.full = rfixed_const(2); ++ b.full = dfixed_const(2); + if (crtc->vsc.full > b.full) + b.full = crtc->vsc.full; +- b.full = rfixed_mul(b, crtc->hsc); +- c.full = rfixed_const(2); +- b.full = rfixed_div(b, c); +- consumption_time.full = rfixed_div(pclk, b); ++ b.full = dfixed_mul(b, crtc->hsc); ++ c.full = dfixed_const(2); ++ b.full = dfixed_div(b, c); ++ consumption_time.full = dfixed_div(pclk, b); + } else { + consumption_time.full = pclk.full; + } +- a.full = rfixed_const(1); +- wm->consumption_rate.full = rfixed_div(a, consumption_time); ++ a.full = dfixed_const(1); ++ wm->consumption_rate.full = dfixed_div(a, consumption_time); + + + /* Determine line time +@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + * LineTime = total number of horizontal pixels + * pclk = pixel clock period(ns) + */ +- a.full = rfixed_const(crtc->base.mode.crtc_htotal); +- line_time.full = rfixed_mul(a, pclk); ++ a.full = dfixed_const(crtc->base.mode.crtc_htotal); ++ line_time.full = dfixed_mul(a, pclk); + + /* Determine active time + * ActiveTime = time of active region of display within one line, + * hactive = total number of horizontal active pixels + * htotal = total number of horizontal pixels + */ +- a.full = rfixed_const(crtc->base.mode.crtc_htotal); +- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); +- wm->active_time.full = rfixed_mul(line_time, b); +- wm->active_time.full = rfixed_div(wm->active_time, a); ++ a.full = dfixed_const(crtc->base.mode.crtc_htotal); ++ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); ++ wm->active_time.full = dfixed_mul(line_time, b); ++ wm->active_time.full = dfixed_div(wm->active_time, a); + + /* Determine chunk time + * ChunkTime = the time it takes the DCP to send one chunk of data + * to the LB which consists of pipeline delay and inter chunk gap + * sclk = system clock(Mhz) + */ +- a.full = rfixed_const(600 * 1000); +- chunk_time.full = rfixed_div(a, rdev->pm.sclk); +- read_delay_latency.full = rfixed_const(1000); ++ a.full = dfixed_const(600 * 1000); ++ chunk_time.full = dfixed_div(a, rdev->pm.sclk); ++ read_delay_latency.full = dfixed_const(1000); + + /* Determine the worst case latency + * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) +@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + * ChunkTime = time it takes the DCP to send one chunk of data to the LB + * which consists of pipeline delay and inter chunk gap + */ +- if (rfixed_trunc(wm->num_line_pair) > 1) { +- a.full = rfixed_const(3); +- wm->worst_case_latency.full = rfixed_mul(a, chunk_time); ++ if (dfixed_trunc(wm->num_line_pair) > 1) { ++ a.full = dfixed_const(3); ++ wm->worst_case_latency.full = dfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } else { + wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; +@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + * of data to the LB which consists of + * pipeline delay and inter chunk gap + */ +- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { ++ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { + tolerable_latency.full = line_time.full; + } else { +- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); ++ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; +- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); ++ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = line_time.full - tolerable_latency.full; + } + /* We assume worst case 32bits (4 bytes) */ +- wm->dbpp.full = rfixed_const(2 * 16); ++ wm->dbpp.full = dfixed_const(2 * 16); + + /* Determine the maximum priority mark + * width = viewport width in pixels + */ +- a.full = rfixed_const(16); +- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); +- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); +- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); ++ a.full = dfixed_const(16); ++ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); ++ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); ++ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); + + /* Determine estimated width */ + estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; +- estimated_width.full = rfixed_div(estimated_width, consumption_time); +- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { ++ estimated_width.full = dfixed_div(estimated_width, consumption_time); ++ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + wm->priority_mark.full = wm->priority_mark_max.full; + } else { +- a.full = rfixed_const(16); +- wm->priority_mark.full = rfixed_div(estimated_width, a); +- wm->priority_mark.full = rfixed_ceil(wm->priority_mark); ++ a.full = dfixed_const(16); ++ wm->priority_mark.full = dfixed_div(estimated_width, a); ++ wm->priority_mark.full = dfixed_ceil(wm->priority_mark); + wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; + } + } +@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + + if (mode0 && mode1) { +- if (rfixed_trunc(wm0.dbpp) > 64) +- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); ++ if (dfixed_trunc(wm0.dbpp) > 64) ++ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; +- if (rfixed_trunc(wm1.dbpp) > 64) +- b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); ++ if (dfixed_trunc(wm1.dbpp) > 64) ++ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); + else + b.full = wm1.num_line_pair.full; + a.full += b.full; +- fill_rate.full = rfixed_div(wm0.sclk, a); ++ fill_rate.full = dfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm0.active_time); +- a.full = rfixed_const(16); +- b.full = rfixed_div(b, a); +- a.full = rfixed_mul(wm0.worst_case_latency, ++ b.full = dfixed_mul(b, wm0.active_time); ++ a.full = dfixed_const(16); ++ b.full = dfixed_div(b, a); ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + priority_mark02.full = a.full + b.full; + } else { +- a.full = rfixed_mul(wm0.worst_case_latency, ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark02.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark02.full = dfixed_div(a, b); + } + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm1.active_time); +- a.full = rfixed_const(16); +- b.full = rfixed_div(b, a); +- a.full = rfixed_mul(wm1.worst_case_latency, ++ b.full = dfixed_mul(b, wm1.active_time); ++ a.full = dfixed_const(16); ++ b.full = dfixed_div(b, a); ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + priority_mark12.full = a.full + b.full; + } else { +- a.full = rfixed_mul(wm1.worst_case_latency, ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark12.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark12.full = dfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; +- if (rfixed_trunc(priority_mark02) < 0) ++ if (dfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; +- if (rfixed_trunc(priority_mark12) < 0) ++ if (dfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; +- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); +- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); ++ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); ++ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) { + d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; +@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) + WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); + } else if (mode0) { +- if (rfixed_trunc(wm0.dbpp) > 64) +- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); ++ if (dfixed_trunc(wm0.dbpp) > 64) ++ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; +- fill_rate.full = rfixed_div(wm0.sclk, a); ++ fill_rate.full = dfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm0.active_time); +- a.full = rfixed_const(16); +- b.full = rfixed_div(b, a); +- a.full = rfixed_mul(wm0.worst_case_latency, ++ b.full = dfixed_mul(b, wm0.active_time); ++ a.full = dfixed_const(16); ++ b.full = dfixed_div(b, a); ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + priority_mark02.full = a.full + b.full; + } else { +- a.full = rfixed_mul(wm0.worst_case_latency, ++ a.full = dfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); +- b.full = rfixed_const(16); +- priority_mark02.full = rfixed_div(a, b); ++ b.full = dfixed_const(16); ++ priority_mark02.full = dfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; +- if (rfixed_trunc(priority_mark02) < 0) ++ if (dfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; +- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); ++ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + if (rdev->disp_priority == 2) + d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); +@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) + WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + } else { +- if (rfixed_trunc(wm1.dbpp) > 64) +- a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); ++ if (dfixed_trunc(wm1.dbpp) > 64) ++ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); + else + a.full = wm1.num_line_pair.full; +- fill_rate.full = rfixed_div(wm1.sclk, a); ++ fill_rate.full = dfixed_div(wm1.sclk, a); + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; +- b.full = rfixed_mul(b, wm1.active_time); +- a.full = rfixed_const(16); +- b.full = rfixed_div(b, a); +- a.full = rfixed_mul(wm1.worst_case_latency, ++ b.full = dfixed_mul(b, wm1.active_time); ++ a.full = dfixed_const(16); ++ b.full = dfixed_div(b, a); ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + priority_mark12.full = a.full + b.full; + } else { +- a.full = rfixed_mul(wm1.worst_case_latency, ++ a.full = dfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); +- b.full = rfixed_const(16 * 1000); +- priority_mark12.full = rfixed_div(a, b); ++ b.full = dfixed_const(16 * 1000); ++ priority_mark12.full = dfixed_div(a, b); + } + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; +- if (rfixed_trunc(priority_mark12) < 0) ++ if (dfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; +- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); ++ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (rdev->disp_priority == 2) + d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); +diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h +index fc216e4..590309a 100644 +--- a/drivers/gpu/drm/radeon/rv515d.h ++++ b/drivers/gpu/drm/radeon/rv515d.h +@@ -217,6 +217,52 @@ + #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) + + /* Registers */ ++#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 ++#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) ++#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) ++#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE ++#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) ++#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) ++#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD ++#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) ++#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) ++#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB ++#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) ++#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) ++#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 ++#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) ++#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) ++#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF ++#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) ++#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) ++#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF ++#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) ++#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) ++#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF ++#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) ++#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) ++#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F ++#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) ++#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) ++#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF ++#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) ++#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) ++#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF ++#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) ++#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) ++#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF ++#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) ++#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) ++#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF ++#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) ++#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) ++#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF ++#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) ++#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) ++#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF ++#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) ++#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) ++#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF + #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 + #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) + #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) +diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c +index 97958a6..cec536c 100644 +--- a/drivers/gpu/drm/radeon/rv770.c ++++ b/drivers/gpu/drm/radeon/rv770.c +@@ -42,6 +42,21 @@ + static void rv770_gpu_init(struct radeon_device *rdev); + void rv770_fini(struct radeon_device *rdev); + ++void rv770_pm_misc(struct radeon_device *rdev) ++{ ++ int req_ps_idx = rdev->pm.requested_power_state_index; ++ int req_cm_idx = rdev->pm.requested_clock_mode_index; ++ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; ++ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; ++ ++ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { ++ if (voltage->voltage != rdev->pm.current_vddc) { ++ radeon_atom_set_voltage(rdev, voltage->voltage); ++ rdev->pm.current_vddc = voltage->voltage; ++ DRM_DEBUG("Setting: v: %d\n", voltage->voltage); ++ } ++ } ++} + + /* + * GART +@@ -237,7 +252,6 @@ void r700_cp_stop(struct radeon_device *rdev) + WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); + } + +- + static int rv770_cp_load_microcode(struct radeon_device *rdev) + { + const __be32 *fw_data; +@@ -272,6 +286,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) + return 0; + } + ++void r700_cp_fini(struct radeon_device *rdev) ++{ ++ r700_cp_stop(rdev); ++ radeon_ring_fini(rdev); ++} + + /* + * Core functions +@@ -906,23 +925,12 @@ int rv770_mc_init(struct radeon_device *rdev) + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); + rdev->mc.visible_vram_size = rdev->mc.aper_size; +- /* FIXME remove this once we support unmappable VRAM */ +- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { +- rdev->mc.mc_vram_size = rdev->mc.aper_size; +- rdev->mc.real_vram_size = rdev->mc.aper_size; +- } + r600_vram_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); + + return 0; + } + +-int rv770_gpu_reset(struct radeon_device *rdev) +-{ +- /* FIXME: implement any rv770 specific bits */ +- return r600_gpu_reset(rdev); +-} +- + static int rv770_startup(struct radeon_device *rdev) + { + int r; +@@ -1094,8 +1102,6 @@ int rv770_init(struct radeon_device *rdev) + r = radeon_clocks_init(rdev); + if (r) + return r; +- /* Initialize power management */ +- radeon_pm_init(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) +@@ -1132,7 +1138,7 @@ int rv770_init(struct radeon_device *rdev) + r = rv770_startup(rdev); + if (r) { + dev_err(rdev->dev, "disabling GPU acceleration\n"); +- r600_cp_fini(rdev); ++ r700_cp_fini(rdev); + r600_wb_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); +@@ -1164,9 +1170,8 @@ int rv770_init(struct radeon_device *rdev) + + void rv770_fini(struct radeon_device *rdev) + { +- radeon_pm_fini(rdev); + r600_blit_fini(rdev); +- r600_cp_fini(rdev); ++ r700_cp_fini(rdev); + r600_wb_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); +diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c +index bff6fc2..2d0c9ca 100644 +--- a/drivers/gpu/drm/savage/savage_bci.c ++++ b/drivers/gpu/drm/savage/savage_bci.c +@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) + { + drm_savage_private_t *dev_priv; + +- dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); ++ dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + +- memset(dev_priv, 0, sizeof(drm_savage_private_t)); + dev->dev_private = (void *)dev_priv; + + dev_priv->chipset = (enum savage_family)chipset; +diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile +index 1e138f5..4256e20 100644 +--- a/drivers/gpu/drm/ttm/Makefile ++++ b/drivers/gpu/drm/ttm/Makefile +@@ -4,6 +4,6 @@ + ccflags-y := -Iinclude/drm + ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ + ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ +- ttm_object.o ttm_lock.o ttm_execbuf_util.o ++ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o + + obj-$(CONFIG_DRM_TTM) += ttm.o +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index 0e3754a..555ebb1 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) + printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); + printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); + printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); +- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); +- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); + printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); + printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", + man->available_caching); +@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) + + static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, +- bool evict, bool interruptible, bool no_wait) ++ bool evict, bool interruptible, ++ bool no_wait_reserve, bool no_wait_gpu) + { + struct ttm_bo_device *bdev = bo->bdev; + bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); +@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, + + if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && + !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) +- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); ++ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); + else if (bdev->driver->move) + ret = bdev->driver->move(bo, evict, interruptible, +- no_wait, mem); ++ no_wait_reserve, no_wait_gpu, mem); + else +- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); ++ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); + + if (ret) + goto out_err; +@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) + } + EXPORT_SYMBOL(ttm_bo_unref); + ++int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) ++{ ++ return cancel_delayed_work_sync(&bdev->wq); ++} ++EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); ++ ++void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) ++{ ++ if (resched) ++ schedule_delayed_work(&bdev->wq, ++ ((HZ / 100) < 1) ? 1 : HZ / 100); ++} ++EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); ++ + static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, +- bool no_wait) ++ bool no_wait_reserve, bool no_wait_gpu) + { + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; +@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, + int ret = 0; + + spin_lock(&bo->lock); +- ret = ttm_bo_wait(bo, false, interruptible, no_wait); ++ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + spin_unlock(&bo->lock); + + if (unlikely(ret != 0)) { +@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; ++ evict_mem.bus.io_reserved = false; + + placement.fpfn = 0; + placement.lpfn = 0; +@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, + placement.num_busy_placement = 0; + bdev->driver->evict_flags(bo, &placement); + ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, +- no_wait); ++ no_wait_reserve, no_wait_gpu); + if (ret) { + if (ret != -ERESTARTSYS) { + printk(KERN_ERR TTM_PFX +@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, + } + + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, +- no_wait); ++ no_wait_reserve, no_wait_gpu); + if (ret) { + if (ret != -ERESTARTSYS) + printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); +@@ -670,7 +684,8 @@ out: + + static int ttm_mem_evict_first(struct ttm_bo_device *bdev, + uint32_t mem_type, +- bool interruptible, bool no_wait) ++ bool interruptible, bool no_wait_reserve, ++ bool no_wait_gpu) + { + struct ttm_bo_global *glob = bdev->glob; + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; +@@ -687,11 +702,11 @@ retry: + bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); + kref_get(&bo->list_kref); + +- ret = ttm_bo_reserve_locked(bo, false, true, false, 0); ++ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); + + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); +- if (likely(!no_wait)) ++ if (likely(!no_wait_gpu)) + ret = ttm_bo_wait_unreserved(bo, interruptible); + + kref_put(&bo->list_kref, ttm_bo_release_list); +@@ -713,7 +728,7 @@ retry: + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + +- ret = ttm_bo_evict(bo, interruptible, no_wait); ++ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); + ttm_bo_unreserve(bo); + + kref_put(&bo->list_kref, ttm_bo_release_list); +@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, + uint32_t mem_type, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, +- bool interruptible, bool no_wait) ++ bool interruptible, ++ bool no_wait_reserve, ++ bool no_wait_gpu) + { + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bdev->glob; +@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, + } + spin_unlock(&glob->lru_lock); + ret = ttm_mem_evict_first(bdev, mem_type, interruptible, +- no_wait); ++ no_wait_reserve, no_wait_gpu); + if (unlikely(ret != 0)) + return ret; + } while (1); +@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, + int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, +- bool interruptible, bool no_wait) ++ bool interruptible, bool no_wait_reserve, ++ bool no_wait_gpu) + { + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man; +@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, + } + + ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, +- interruptible, no_wait); ++ interruptible, no_wait_reserve, no_wait_gpu); + if (ret == 0 && mem->mm_node) { + mem->placement = cur_flags; + mem->mm_node->private = bo; +@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu); + + int ttm_bo_move_buffer(struct ttm_buffer_object *bo, + struct ttm_placement *placement, +- bool interruptible, bool no_wait) ++ bool interruptible, bool no_wait_reserve, ++ bool no_wait_gpu) + { + struct ttm_bo_global *glob = bo->glob; + int ret = 0; +@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, + * instead of doing it here. + */ + spin_lock(&bo->lock); +- ret = ttm_bo_wait(bo, false, interruptible, no_wait); ++ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + spin_unlock(&bo->lock); + if (ret) + return ret; + mem.num_pages = bo->num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.page_alignment = bo->mem.page_alignment; ++ mem.bus.io_reserved = false; + /* + * Determine where to move the buffer. + */ +- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); ++ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); + if (ret) + goto out_unlock; +- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); ++ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); + out_unlock: + if (ret && mem.mm_node) { + spin_lock(&glob->lru_lock); +@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, + + int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, +- bool interruptible, bool no_wait) ++ bool interruptible, bool no_wait_reserve, ++ bool no_wait_gpu) + { + int ret; + +@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, + */ + ret = ttm_bo_mem_compat(placement, &bo->mem); + if (ret < 0) { +- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); ++ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); + if (ret) + return ret; + } else { +@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, + bo->mem.num_pages = bo->num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; ++ bo->mem.bus.io_reserved = false; + bo->buffer_start = buffer_start & PAGE_MASK; + bo->priv_flags = 0; + bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); +@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, + goto out_err; + } + +- ret = ttm_bo_validate(bo, placement, interruptible, false); ++ ret = ttm_bo_validate(bo, placement, interruptible, false, false); + if (ret) + goto out_err; + +@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, + spin_lock(&glob->lru_lock); + while (!list_empty(&man->lru)) { + spin_unlock(&glob->lru_lock); +- ret = ttm_mem_evict_first(bdev, mem_type, false, false); ++ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); + if (ret) { + if (allow_errors) { + return ret; +@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) + return true; + } + +-int ttm_bo_pci_offset(struct ttm_bo_device *bdev, +- struct ttm_mem_reg *mem, +- unsigned long *bus_base, +- unsigned long *bus_offset, unsigned long *bus_size) +-{ +- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; +- +- *bus_size = 0; +- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) +- return -EINVAL; +- +- if (ttm_mem_reg_is_pci(bdev, mem)) { +- *bus_offset = mem->mm_node->start << PAGE_SHIFT; +- *bus_size = mem->num_pages << PAGE_SHIFT; +- *bus_base = man->io_offset; +- } +- +- return 0; +-} +- + void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) + { + struct ttm_bo_device *bdev = bo->bdev; +@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) + + if (!bdev->dev_mapping) + return; +- + unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); ++ ttm_mem_io_free(bdev, &bo->mem); + } + EXPORT_SYMBOL(ttm_bo_unmap_virtual); + +@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) + evict_mem.mem_type = TTM_PL_SYSTEM; + + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, +- false, false); ++ false, false, false); + if (unlikely(ret != 0)) + goto out; + } +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c +index d764e82..13012a1 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c +@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) + } + + int ttm_bo_move_ttm(struct ttm_buffer_object *bo, +- bool evict, bool no_wait, struct ttm_mem_reg *new_mem) ++ bool evict, bool no_wait_reserve, ++ bool no_wait_gpu, struct ttm_mem_reg *new_mem) + { + struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg *old_mem = &bo->mem; +@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + } + EXPORT_SYMBOL(ttm_bo_move_ttm); + ++int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++ int ret; ++ ++ if (!mem->bus.io_reserved) { ++ mem->bus.io_reserved = true; ++ ret = bdev->driver->io_mem_reserve(bdev, mem); ++ if (unlikely(ret != 0)) ++ return ret; ++ } ++ return 0; ++} ++ ++void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++ if (bdev->driver->io_mem_reserve) { ++ if (mem->bus.io_reserved) { ++ mem->bus.io_reserved = false; ++ bdev->driver->io_mem_free(bdev, mem); ++ } ++ } ++} ++ + int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, + void **virtual) + { +- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; +- unsigned long bus_offset; +- unsigned long bus_size; +- unsigned long bus_base; + int ret; + void *addr; + + *virtual = NULL; +- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); +- if (ret || bus_size == 0) ++ ret = ttm_mem_io_reserve(bdev, mem); ++ if (ret || !mem->bus.is_iomem) + return ret; + +- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) +- addr = (void *)(((u8 *) man->io_addr) + bus_offset); +- else { ++ if (mem->bus.addr) { ++ addr = mem->bus.addr; ++ } else { + if (mem->placement & TTM_PL_FLAG_WC) +- addr = ioremap_wc(bus_base + bus_offset, bus_size); ++ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); + else +- addr = ioremap_nocache(bus_base + bus_offset, bus_size); +- if (!addr) ++ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); ++ if (!addr) { ++ ttm_mem_io_free(bdev, mem); + return -ENOMEM; ++ } + } + *virtual = addr; + return 0; +@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, + + man = &bdev->man[mem->mem_type]; + +- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) ++ if (virtual && mem->bus.addr == NULL) + iounmap(virtual); ++ ttm_mem_io_free(bdev, mem); + } + + static int ttm_copy_io_page(void *dst, void *src, unsigned long page) +@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, + } + + int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, +- bool evict, bool no_wait, struct ttm_mem_reg *new_mem) ++ bool evict, bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem) + { + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; +@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) + EXPORT_SYMBOL(ttm_io_prot); + + static int ttm_bo_ioremap(struct ttm_buffer_object *bo, +- unsigned long bus_base, +- unsigned long bus_offset, +- unsigned long bus_size, ++ unsigned long offset, ++ unsigned long size, + struct ttm_bo_kmap_obj *map) + { +- struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_reg *mem = &bo->mem; +- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + +- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { ++ if (bo->mem.bus.addr) { + map->bo_kmap_type = ttm_bo_map_premapped; +- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); ++ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); + } else { + map->bo_kmap_type = ttm_bo_map_iomap; + if (mem->placement & TTM_PL_FLAG_WC) +- map->virtual = ioremap_wc(bus_base + bus_offset, +- bus_size); ++ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, ++ size); + else +- map->virtual = ioremap_nocache(bus_base + bus_offset, +- bus_size); ++ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, ++ size); + } + return (!map->virtual) ? -ENOMEM : 0; + } +@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, + unsigned long start_page, unsigned long num_pages, + struct ttm_bo_kmap_obj *map) + { ++ unsigned long offset, size; + int ret; +- unsigned long bus_base; +- unsigned long bus_offset; +- unsigned long bus_size; + + BUG_ON(!list_empty(&bo->swap)); + map->virtual = NULL; ++ map->bo = bo; + if (num_pages > bo->num_pages) + return -EINVAL; + if (start_page > bo->num_pages) +@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, + if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) + return -EPERM; + #endif +- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, +- &bus_offset, &bus_size); ++ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); + if (ret) + return ret; +- if (bus_size == 0) { ++ if (!bo->mem.bus.is_iomem) { + return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); + } else { +- bus_offset += start_page << PAGE_SHIFT; +- bus_size = num_pages << PAGE_SHIFT; +- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); ++ offset = start_page << PAGE_SHIFT; ++ size = num_pages << PAGE_SHIFT; ++ return ttm_bo_ioremap(bo, offset, size, map); + } + } + EXPORT_SYMBOL(ttm_bo_kmap); +@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) + switch (map->bo_kmap_type) { + case ttm_bo_map_iomap: + iounmap(map->virtual); ++ ttm_mem_io_free(map->bo->bdev, &map->bo->mem); + break; + case ttm_bo_map_vmap: + vunmap(map->virtual); +@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) + } + EXPORT_SYMBOL(ttm_bo_kunmap); + +-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, +- unsigned long dst_offset, +- unsigned long *pfn, pgprot_t *prot) +-{ +- struct ttm_mem_reg *mem = &bo->mem; +- struct ttm_bo_device *bdev = bo->bdev; +- unsigned long bus_offset; +- unsigned long bus_size; +- unsigned long bus_base; +- int ret; +- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, +- &bus_size); +- if (ret) +- return -EINVAL; +- if (bus_size != 0) +- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; +- else +- if (!bo->ttm) +- return -EINVAL; +- else +- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, +- dst_offset >> +- PAGE_SHIFT)); +- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? +- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); +- +- return 0; +-} +- + int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + void *sync_obj, + void *sync_obj_arg, +- bool evict, bool no_wait, ++ bool evict, bool no_wait_reserve, ++ bool no_wait_gpu, + struct ttm_mem_reg *new_mem) + { + struct ttm_bo_device *bdev = bo->bdev; +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c +index 668dbe8..fe6cb77 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c +@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct ttm_bo_device *bdev = bo->bdev; +- unsigned long bus_base; +- unsigned long bus_offset; +- unsigned long bus_size; + unsigned long page_offset; + unsigned long page_last; + unsigned long pfn; +@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + struct page *page; + int ret; + int i; +- bool is_iomem; + unsigned long address = (unsigned long)vmf->virtual_address; + int retval = VM_FAULT_NOPAGE; + +@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + return VM_FAULT_NOPAGE; + } + +- if (bdev->driver->fault_reserve_notify) +- bdev->driver->fault_reserve_notify(bo); ++ if (bdev->driver->fault_reserve_notify) { ++ ret = bdev->driver->fault_reserve_notify(bo); ++ switch (ret) { ++ case 0: ++ break; ++ case -EBUSY: ++ set_need_resched(); ++ case -ERESTARTSYS: ++ retval = VM_FAULT_NOPAGE; ++ goto out_unlock; ++ default: ++ retval = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ } + + /* + * Wait for buffer data in transit, due to a pipelined +@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + spin_unlock(&bo->lock); + + +- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, +- &bus_size); +- if (unlikely(ret != 0)) { ++ ret = ttm_mem_io_reserve(bdev, &bo->mem); ++ if (ret) { + retval = VM_FAULT_SIGBUS; + goto out_unlock; + } + +- is_iomem = (bus_size != 0); +- + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + + bo->vm_node->start - vma->vm_pgoff; + page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + +@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + * vma->vm_page_prot when the object changes caching policy, with + * the correct locks held. + */ +- +- if (is_iomem) { ++ if (bo->mem.bus.is_iomem) { + vma->vm_page_prot = ttm_io_prot(bo->mem.placement, + vma->vm_page_prot); + } else { +@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + */ + + for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { +- +- if (is_iomem) +- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + +- page_offset; ++ if (bo->mem.bus.is_iomem) ++ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; + else { + page = ttm_tt_get_page(ttm, page_offset); + if (unlikely(!page && i == 0)) { +@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + retval = + (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + goto out_unlock; +- + } + + address += PAGE_SIZE; +@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) + + static void ttm_bo_vm_close(struct vm_area_struct *vma) + { +- struct ttm_buffer_object *bo = +- (struct ttm_buffer_object *)vma->vm_private_data; ++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; + + ttm_bo_unref(&bo); + vma->vm_private_data = NULL; +diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c +index 801b702..e70ddd8 100644 +--- a/drivers/gpu/drm/ttm/ttm_memory.c ++++ b/drivers/gpu/drm/ttm/ttm_memory.c +@@ -27,6 +27,7 @@ + + #include "ttm/ttm_memory.h" + #include "ttm/ttm_module.h" ++#include "ttm/ttm_page_alloc.h" + #include + #include + #include +@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) + "Zone %7s: Available graphics memory: %llu kiB.\n", + zone->name, (unsigned long long) zone->max_mem >> 10); + } ++ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); + return 0; + out_no_zone: + ttm_mem_global_release(glob); +@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) + unsigned int i; + struct ttm_mem_zone *zone; + ++ /* let the page allocator first stop the shrink work. */ ++ ttm_page_alloc_fini(); ++ + flush_workqueue(glob->swap_queue); + destroy_workqueue(glob->swap_queue); + glob->swap_queue = NULL; +@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) + zone = glob->zones[i]; + kobject_del(&zone->kobj); + kobject_put(&zone->kobj); +- } ++ } + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); + } +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +new file mode 100644 +index 0000000..ef91069 +--- /dev/null ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -0,0 +1,855 @@ ++/* ++ * Copyright (c) Red Hat Inc. ++ ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Jerome Glisse ++ * Pauli Nieminen ++ */ ++ ++/* simple list based uncached page pool ++ * - Pool collects resently freed pages for reuse ++ * - Use page->lru to keep a free list ++ * - doesn't track currently in use pages ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* for seq_printf */ ++#include ++ ++#include ++#include ++ ++#include "ttm/ttm_bo_driver.h" ++#include "ttm/ttm_page_alloc.h" ++ ++ ++#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) ++#define SMALL_ALLOCATION 16 ++#define FREE_ALL_PAGES (~0U) ++/* times are in msecs */ ++#define PAGE_FREE_INTERVAL 1000 ++ ++/** ++ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. ++ * ++ * @lock: Protects the shared pool from concurrnet access. Must be used with ++ * irqsave/irqrestore variants because pool allocator maybe called from ++ * delayed work. ++ * @fill_lock: Prevent concurrent calls to fill. ++ * @list: Pool of free uc/wc pages for fast reuse. ++ * @gfp_flags: Flags to pass for alloc_page. ++ * @npages: Number of pages in pool. ++ */ ++struct ttm_page_pool { ++ spinlock_t lock; ++ bool fill_lock; ++ struct list_head list; ++ int gfp_flags; ++ unsigned npages; ++ char *name; ++ unsigned long nfrees; ++ unsigned long nrefills; ++}; ++ ++/** ++ * Limits for the pool. They are handled without locks because only place where ++ * they may change is in sysfs store. They won't have immediate effect anyway ++ * so forcing serialization to access them is pointless. ++ */ ++ ++struct ttm_pool_opts { ++ unsigned alloc_size; ++ unsigned max_size; ++ unsigned small; ++}; ++ ++#define NUM_POOLS 4 ++ ++/** ++ * struct ttm_pool_manager - Holds memory pools for fst allocation ++ * ++ * Manager is read only object for pool code so it doesn't need locking. ++ * ++ * @free_interval: minimum number of jiffies between freeing pages from pool. ++ * @page_alloc_inited: reference counting for pool allocation. ++ * @work: Work that is used to shrink the pool. Work is only run when there is ++ * some pages to free. ++ * @small_allocation: Limit in number of pages what is small allocation. ++ * ++ * @pools: All pool objects in use. ++ **/ ++struct ttm_pool_manager { ++ struct kobject kobj; ++ struct shrinker mm_shrink; ++ atomic_t page_alloc_inited; ++ struct ttm_pool_opts options; ++ ++ union { ++ struct ttm_page_pool pools[NUM_POOLS]; ++ struct { ++ struct ttm_page_pool wc_pool; ++ struct ttm_page_pool uc_pool; ++ struct ttm_page_pool wc_pool_dma32; ++ struct ttm_page_pool uc_pool_dma32; ++ } ; ++ }; ++}; ++ ++static struct attribute ttm_page_pool_max = { ++ .name = "pool_max_size", ++ .mode = S_IRUGO | S_IWUSR ++}; ++static struct attribute ttm_page_pool_small = { ++ .name = "pool_small_allocation", ++ .mode = S_IRUGO | S_IWUSR ++}; ++static struct attribute ttm_page_pool_alloc_size = { ++ .name = "pool_allocation_size", ++ .mode = S_IRUGO | S_IWUSR ++}; ++ ++static struct attribute *ttm_pool_attrs[] = { ++ &ttm_page_pool_max, ++ &ttm_page_pool_small, ++ &ttm_page_pool_alloc_size, ++ NULL ++}; ++ ++static void ttm_pool_kobj_release(struct kobject *kobj) ++{ ++ struct ttm_pool_manager *m = ++ container_of(kobj, struct ttm_pool_manager, kobj); ++ (void)m; ++} ++ ++static ssize_t ttm_pool_store(struct kobject *kobj, ++ struct attribute *attr, const char *buffer, size_t size) ++{ ++ struct ttm_pool_manager *m = ++ container_of(kobj, struct ttm_pool_manager, kobj); ++ int chars; ++ unsigned val; ++ chars = sscanf(buffer, "%u", &val); ++ if (chars == 0) ++ return size; ++ ++ /* Convert kb to number of pages */ ++ val = val / (PAGE_SIZE >> 10); ++ ++ if (attr == &ttm_page_pool_max) ++ m->options.max_size = val; ++ else if (attr == &ttm_page_pool_small) ++ m->options.small = val; ++ else if (attr == &ttm_page_pool_alloc_size) { ++ if (val > NUM_PAGES_TO_ALLOC*8) { ++ printk(KERN_ERR TTM_PFX ++ "Setting allocation size to %lu " ++ "is not allowed. Recommended size is " ++ "%lu\n", ++ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), ++ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); ++ return size; ++ } else if (val > NUM_PAGES_TO_ALLOC) { ++ printk(KERN_WARNING TTM_PFX ++ "Setting allocation size to " ++ "larger than %lu is not recommended.\n", ++ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); ++ } ++ m->options.alloc_size = val; ++ } ++ ++ return size; ++} ++ ++static ssize_t ttm_pool_show(struct kobject *kobj, ++ struct attribute *attr, char *buffer) ++{ ++ struct ttm_pool_manager *m = ++ container_of(kobj, struct ttm_pool_manager, kobj); ++ unsigned val = 0; ++ ++ if (attr == &ttm_page_pool_max) ++ val = m->options.max_size; ++ else if (attr == &ttm_page_pool_small) ++ val = m->options.small; ++ else if (attr == &ttm_page_pool_alloc_size) ++ val = m->options.alloc_size; ++ ++ val = val * (PAGE_SIZE >> 10); ++ ++ return snprintf(buffer, PAGE_SIZE, "%u\n", val); ++} ++ ++static const struct sysfs_ops ttm_pool_sysfs_ops = { ++ .show = &ttm_pool_show, ++ .store = &ttm_pool_store, ++}; ++ ++static struct kobj_type ttm_pool_kobj_type = { ++ .release = &ttm_pool_kobj_release, ++ .sysfs_ops = &ttm_pool_sysfs_ops, ++ .default_attrs = ttm_pool_attrs, ++}; ++ ++static struct ttm_pool_manager _manager = { ++ .page_alloc_inited = ATOMIC_INIT(0) ++}; ++ ++#ifndef CONFIG_X86 ++static int set_pages_array_wb(struct page **pages, int addrinarray) ++{ ++#ifdef TTM_HAS_AGP ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ unmap_page_from_agp(pages[i]); ++#endif ++ return 0; ++} ++ ++static int set_pages_array_wc(struct page **pages, int addrinarray) ++{ ++#ifdef TTM_HAS_AGP ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ map_page_into_agp(pages[i]); ++#endif ++ return 0; ++} ++ ++static int set_pages_array_uc(struct page **pages, int addrinarray) ++{ ++#ifdef TTM_HAS_AGP ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ map_page_into_agp(pages[i]); ++#endif ++ return 0; ++} ++#endif ++ ++/** ++ * Select the right pool or requested caching state and ttm flags. */ ++static struct ttm_page_pool *ttm_get_pool(int flags, ++ enum ttm_caching_state cstate) ++{ ++ int pool_index; ++ ++ if (cstate == tt_cached) ++ return NULL; ++ ++ if (cstate == tt_wc) ++ pool_index = 0x0; ++ else ++ pool_index = 0x1; ++ ++ if (flags & TTM_PAGE_FLAG_DMA32) ++ pool_index |= 0x2; ++ ++ return &_manager.pools[pool_index]; ++} ++ ++/* set memory back to wb and free the pages. */ ++static void ttm_pages_put(struct page *pages[], unsigned npages) ++{ ++ unsigned i; ++ if (set_pages_array_wb(pages, npages)) ++ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", ++ npages); ++ for (i = 0; i < npages; ++i) ++ __free_page(pages[i]); ++} ++ ++static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, ++ unsigned freed_pages) ++{ ++ pool->npages -= freed_pages; ++ pool->nfrees += freed_pages; ++} ++ ++/** ++ * Free pages from pool. ++ * ++ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC ++ * number of pages in one go. ++ * ++ * @pool: to free the pages from ++ * @free_all: If set to true will free all pages in pool ++ **/ ++static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) ++{ ++ unsigned long irq_flags; ++ struct page *p; ++ struct page **pages_to_free; ++ unsigned freed_pages = 0, ++ npages_to_free = nr_free; ++ ++ if (NUM_PAGES_TO_ALLOC < nr_free) ++ npages_to_free = NUM_PAGES_TO_ALLOC; ++ ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), ++ GFP_KERNEL); ++ if (!pages_to_free) { ++ printk(KERN_ERR TTM_PFX ++ "Failed to allocate memory for pool free operation.\n"); ++ return 0; ++ } ++ ++restart: ++ spin_lock_irqsave(&pool->lock, irq_flags); ++ ++ list_for_each_entry_reverse(p, &pool->list, lru) { ++ if (freed_pages >= npages_to_free) ++ break; ++ ++ pages_to_free[freed_pages++] = p; ++ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ ++ if (freed_pages >= NUM_PAGES_TO_ALLOC) { ++ /* remove range of pages from the pool */ ++ __list_del(p->lru.prev, &pool->list); ++ ++ ttm_pool_update_free_locked(pool, freed_pages); ++ /** ++ * Because changing page caching is costly ++ * we unlock the pool to prevent stalling. ++ */ ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ ++ ttm_pages_put(pages_to_free, freed_pages); ++ if (likely(nr_free != FREE_ALL_PAGES)) ++ nr_free -= freed_pages; ++ ++ if (NUM_PAGES_TO_ALLOC >= nr_free) ++ npages_to_free = nr_free; ++ else ++ npages_to_free = NUM_PAGES_TO_ALLOC; ++ ++ freed_pages = 0; ++ ++ /* free all so restart the processing */ ++ if (nr_free) ++ goto restart; ++ ++ /* Not allowed to fall tough or break because ++ * following context is inside spinlock while we are ++ * outside here. ++ */ ++ goto out; ++ ++ } ++ } ++ ++ /* remove range of pages from the pool */ ++ if (freed_pages) { ++ __list_del(&p->lru, &pool->list); ++ ++ ttm_pool_update_free_locked(pool, freed_pages); ++ nr_free -= freed_pages; ++ } ++ ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ ++ if (freed_pages) ++ ttm_pages_put(pages_to_free, freed_pages); ++out: ++ kfree(pages_to_free); ++ return nr_free; ++} ++ ++/* Get good estimation how many pages are free in pools */ ++static int ttm_pool_get_num_unused_pages(void) ++{ ++ unsigned i; ++ int total = 0; ++ for (i = 0; i < NUM_POOLS; ++i) ++ total += _manager.pools[i].npages; ++ ++ return total; ++} ++ ++/** ++ * Callback for mm to request pool to reduce number of page held. ++ */ ++static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) ++{ ++ static atomic_t start_pool = ATOMIC_INIT(0); ++ unsigned i; ++ unsigned pool_offset = atomic_add_return(1, &start_pool); ++ struct ttm_page_pool *pool; ++ ++ pool_offset = pool_offset % NUM_POOLS; ++ /* select start pool in round robin fashion */ ++ for (i = 0; i < NUM_POOLS; ++i) { ++ unsigned nr_free = shrink_pages; ++ if (shrink_pages == 0) ++ break; ++ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; ++ shrink_pages = ttm_page_pool_free(pool, nr_free); ++ } ++ /* return estimated number of unused pages in pool */ ++ return ttm_pool_get_num_unused_pages(); ++} ++ ++static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) ++{ ++ manager->mm_shrink.shrink = &ttm_pool_mm_shrink; ++ manager->mm_shrink.seeks = 1; ++ register_shrinker(&manager->mm_shrink); ++} ++ ++static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) ++{ ++ unregister_shrinker(&manager->mm_shrink); ++} ++ ++static int ttm_set_pages_caching(struct page **pages, ++ enum ttm_caching_state cstate, unsigned cpages) ++{ ++ int r = 0; ++ /* Set page caching */ ++ switch (cstate) { ++ case tt_uncached: ++ r = set_pages_array_uc(pages, cpages); ++ if (r) ++ printk(KERN_ERR TTM_PFX ++ "Failed to set %d pages to uc!\n", ++ cpages); ++ break; ++ case tt_wc: ++ r = set_pages_array_wc(pages, cpages); ++ if (r) ++ printk(KERN_ERR TTM_PFX ++ "Failed to set %d pages to wc!\n", ++ cpages); ++ break; ++ default: ++ break; ++ } ++ return r; ++} ++ ++/** ++ * Free pages the pages that failed to change the caching state. If there is ++ * any pages that have changed their caching state already put them to the ++ * pool. ++ */ ++static void ttm_handle_caching_state_failure(struct list_head *pages, ++ int ttm_flags, enum ttm_caching_state cstate, ++ struct page **failed_pages, unsigned cpages) ++{ ++ unsigned i; ++ /* Failed pages have to be freed */ ++ for (i = 0; i < cpages; ++i) { ++ list_del(&failed_pages[i]->lru); ++ __free_page(failed_pages[i]); ++ } ++} ++ ++/** ++ * Allocate new pages with correct caching. ++ * ++ * This function is reentrant if caller updates count depending on number of ++ * pages returned in pages array. ++ */ ++static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, ++ int ttm_flags, enum ttm_caching_state cstate, unsigned count) ++{ ++ struct page **caching_array; ++ struct page *p; ++ int r = 0; ++ unsigned i, cpages; ++ unsigned max_cpages = min(count, ++ (unsigned)(PAGE_SIZE/sizeof(struct page *))); ++ ++ /* allocate array for page caching change */ ++ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); ++ ++ if (!caching_array) { ++ printk(KERN_ERR TTM_PFX ++ "Unable to allocate table for new pages."); ++ return -ENOMEM; ++ } ++ ++ for (i = 0, cpages = 0; i < count; ++i) { ++ p = alloc_page(gfp_flags); ++ ++ if (!p) { ++ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); ++ ++ /* store already allocated pages in the pool after ++ * setting the caching state */ ++ if (cpages) { ++ r = ttm_set_pages_caching(caching_array, ++ cstate, cpages); ++ if (r) ++ ttm_handle_caching_state_failure(pages, ++ ttm_flags, cstate, ++ caching_array, cpages); ++ } ++ r = -ENOMEM; ++ goto out; ++ } ++ ++#ifdef CONFIG_HIGHMEM ++ /* gfp flags of highmem page should never be dma32 so we ++ * we should be fine in such case ++ */ ++ if (!PageHighMem(p)) ++#endif ++ { ++ caching_array[cpages++] = p; ++ if (cpages == max_cpages) { ++ ++ r = ttm_set_pages_caching(caching_array, ++ cstate, cpages); ++ if (r) { ++ ttm_handle_caching_state_failure(pages, ++ ttm_flags, cstate, ++ caching_array, cpages); ++ goto out; ++ } ++ cpages = 0; ++ } ++ } ++ ++ list_add(&p->lru, pages); ++ } ++ ++ if (cpages) { ++ r = ttm_set_pages_caching(caching_array, cstate, cpages); ++ if (r) ++ ttm_handle_caching_state_failure(pages, ++ ttm_flags, cstate, ++ caching_array, cpages); ++ } ++out: ++ kfree(caching_array); ++ ++ return r; ++} ++ ++/** ++ * Fill the given pool if there isn't enough pages and requested number of ++ * pages is small. ++ */ ++static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ++ int ttm_flags, enum ttm_caching_state cstate, unsigned count, ++ unsigned long *irq_flags) ++{ ++ struct page *p; ++ int r; ++ unsigned cpages = 0; ++ /** ++ * Only allow one pool fill operation at a time. ++ * If pool doesn't have enough pages for the allocation new pages are ++ * allocated from outside of pool. ++ */ ++ if (pool->fill_lock) ++ return; ++ ++ pool->fill_lock = true; ++ ++ /* If allocation request is small and there is not enough ++ * pages in pool we fill the pool first */ ++ if (count < _manager.options.small ++ && count > pool->npages) { ++ struct list_head new_pages; ++ unsigned alloc_size = _manager.options.alloc_size; ++ ++ /** ++ * Can't change page caching if in irqsave context. We have to ++ * drop the pool->lock. ++ */ ++ spin_unlock_irqrestore(&pool->lock, *irq_flags); ++ ++ INIT_LIST_HEAD(&new_pages); ++ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, ++ cstate, alloc_size); ++ spin_lock_irqsave(&pool->lock, *irq_flags); ++ ++ if (!r) { ++ list_splice(&new_pages, &pool->list); ++ ++pool->nrefills; ++ pool->npages += alloc_size; ++ } else { ++ printk(KERN_ERR TTM_PFX ++ "Failed to fill pool (%p).", pool); ++ /* If we have any pages left put them to the pool. */ ++ list_for_each_entry(p, &pool->list, lru) { ++ ++cpages; ++ } ++ list_splice(&new_pages, &pool->list); ++ pool->npages += cpages; ++ } ++ ++ } ++ pool->fill_lock = false; ++} ++ ++/** ++ * Cut count nubmer of pages from the pool and put them to return list ++ * ++ * @return count of pages still to allocate to fill the request. ++ */ ++static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ++ struct list_head *pages, int ttm_flags, ++ enum ttm_caching_state cstate, unsigned count) ++{ ++ unsigned long irq_flags; ++ struct list_head *p; ++ unsigned i; ++ ++ spin_lock_irqsave(&pool->lock, irq_flags); ++ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); ++ ++ if (count >= pool->npages) { ++ /* take all pages from the pool */ ++ list_splice_init(&pool->list, pages); ++ count -= pool->npages; ++ pool->npages = 0; ++ goto out; ++ } ++ /* find the last pages to include for requested number of pages. Split ++ * pool to begin and halves to reduce search space. */ ++ if (count <= pool->npages/2) { ++ i = 0; ++ list_for_each(p, &pool->list) { ++ if (++i == count) ++ break; ++ } ++ } else { ++ i = pool->npages + 1; ++ list_for_each_prev(p, &pool->list) { ++ if (--i == count) ++ break; ++ } ++ } ++ /* Cut count number of pages from pool */ ++ list_cut_position(pages, &pool->list, p); ++ pool->npages -= count; ++ count = 0; ++out: ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ return count; ++} ++ ++/* ++ * On success pages list will hold count number of correctly ++ * cached pages. ++ */ ++int ttm_get_pages(struct list_head *pages, int flags, ++ enum ttm_caching_state cstate, unsigned count) ++{ ++ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); ++ struct page *p = NULL; ++ int gfp_flags = 0; ++ int r; ++ ++ /* set zero flag for page allocation if required */ ++ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) ++ gfp_flags |= __GFP_ZERO; ++ ++ /* No pool for cached pages */ ++ if (pool == NULL) { ++ if (flags & TTM_PAGE_FLAG_DMA32) ++ gfp_flags |= GFP_DMA32; ++ else ++ gfp_flags |= GFP_HIGHUSER; ++ ++ for (r = 0; r < count; ++r) { ++ p = alloc_page(gfp_flags); ++ if (!p) { ++ ++ printk(KERN_ERR TTM_PFX ++ "Unable to allocate page."); ++ return -ENOMEM; ++ } ++ ++ list_add(&p->lru, pages); ++ } ++ return 0; ++ } ++ ++ ++ /* combine zero flag to pool flags */ ++ gfp_flags |= pool->gfp_flags; ++ ++ /* First we take pages from the pool */ ++ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); ++ ++ /* clear the pages coming from the pool if requested */ ++ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { ++ list_for_each_entry(p, pages, lru) { ++ clear_page(page_address(p)); ++ } ++ } ++ ++ /* If pool didn't have enough pages allocate new one. */ ++ if (count > 0) { ++ /* ttm_alloc_new_pages doesn't reference pool so we can run ++ * multiple requests in parallel. ++ **/ ++ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); ++ if (r) { ++ /* If there is any pages in the list put them back to ++ * the pool. */ ++ printk(KERN_ERR TTM_PFX ++ "Failed to allocate extra pages " ++ "for large request."); ++ ttm_put_pages(pages, 0, flags, cstate); ++ return r; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++/* Put all pages in pages list to correct pool to wait for reuse */ ++void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, ++ enum ttm_caching_state cstate) ++{ ++ unsigned long irq_flags; ++ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); ++ struct page *p, *tmp; ++ ++ if (pool == NULL) { ++ /* No pool for this memory type so free the pages */ ++ ++ list_for_each_entry_safe(p, tmp, pages, lru) { ++ __free_page(p); ++ } ++ /* Make the pages list empty */ ++ INIT_LIST_HEAD(pages); ++ return; ++ } ++ if (page_count == 0) { ++ list_for_each_entry_safe(p, tmp, pages, lru) { ++ ++page_count; ++ } ++ } ++ ++ spin_lock_irqsave(&pool->lock, irq_flags); ++ list_splice_init(pages, &pool->list); ++ pool->npages += page_count; ++ /* Check that we don't go over the pool limit */ ++ page_count = 0; ++ if (pool->npages > _manager.options.max_size) { ++ page_count = pool->npages - _manager.options.max_size; ++ /* free at least NUM_PAGES_TO_ALLOC number of pages ++ * to reduce calls to set_memory_wb */ ++ if (page_count < NUM_PAGES_TO_ALLOC) ++ page_count = NUM_PAGES_TO_ALLOC; ++ } ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ if (page_count) ++ ttm_page_pool_free(pool, page_count); ++} ++ ++static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, ++ char *name) ++{ ++ spin_lock_init(&pool->lock); ++ pool->fill_lock = false; ++ INIT_LIST_HEAD(&pool->list); ++ pool->npages = pool->nfrees = 0; ++ pool->gfp_flags = flags; ++ pool->name = name; ++} ++ ++int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ++{ ++ int ret; ++ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) ++ return 0; ++ ++ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); ++ ++ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); ++ ++ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); ++ ++ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, ++ "wc dma"); ++ ++ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, ++ "uc dma"); ++ ++ _manager.options.max_size = max_pages; ++ _manager.options.small = SMALL_ALLOCATION; ++ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; ++ ++ kobject_init(&_manager.kobj, &ttm_pool_kobj_type); ++ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); ++ if (unlikely(ret != 0)) { ++ kobject_put(&_manager.kobj); ++ return ret; ++ } ++ ++ ttm_pool_mm_shrink_init(&_manager); ++ ++ return 0; ++} ++ ++void ttm_page_alloc_fini() ++{ ++ int i; ++ ++ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) ++ return; ++ ++ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); ++ ttm_pool_mm_shrink_fini(&_manager); ++ ++ for (i = 0; i < NUM_POOLS; ++i) ++ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); ++ ++ kobject_put(&_manager.kobj); ++} ++ ++int ttm_page_alloc_debugfs(struct seq_file *m, void *data) ++{ ++ struct ttm_page_pool *p; ++ unsigned i; ++ char *h[] = {"pool", "refills", "pages freed", "size"}; ++ if (atomic_read(&_manager.page_alloc_inited) == 0) { ++ seq_printf(m, "No pool allocator running.\n"); ++ return 0; ++ } ++ seq_printf(m, "%6s %12s %13s %8s\n", ++ h[0], h[1], h[2], h[3]); ++ for (i = 0; i < NUM_POOLS; ++i) { ++ p = &_manager.pools[i]; ++ ++ seq_printf(m, "%6s %12ld %13ld %8d\n", ++ p->name, p->nrefills, ++ p->nfrees, p->npages); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(ttm_page_alloc_debugfs); +diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c +index d5fd5b8..a7bab87 100644 +--- a/drivers/gpu/drm/ttm/ttm_tt.c ++++ b/drivers/gpu/drm/ttm/ttm_tt.c +@@ -39,6 +39,7 @@ + #include "ttm/ttm_module.h" + #include "ttm/ttm_bo_driver.h" + #include "ttm/ttm_placement.h" ++#include "ttm/ttm_page_alloc.h" + + static int ttm_tt_swapin(struct ttm_tt *ttm); + +@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) + ttm->pages = NULL; + } + +-static struct page *ttm_tt_alloc_page(unsigned page_flags) +-{ +- gfp_t gfp_flags = GFP_USER; +- +- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) +- gfp_flags |= __GFP_ZERO; +- +- if (page_flags & TTM_PAGE_FLAG_DMA32) +- gfp_flags |= __GFP_DMA32; +- else +- gfp_flags |= __GFP_HIGHMEM; +- +- return alloc_page(gfp_flags); +-} +- + static void ttm_tt_free_user_pages(struct ttm_tt *ttm) + { + int write; +@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) + static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) + { + struct page *p; ++ struct list_head h; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; + int ret; + + while (NULL == (p = ttm->pages[index])) { +- p = ttm_tt_alloc_page(ttm->page_flags); + +- if (!p) ++ INIT_LIST_HEAD(&h); ++ ++ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); ++ ++ if (ret != 0) + return NULL; + ++ p = list_first_entry(&h, struct page, lru); ++ + ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); + if (unlikely(ret != 0)) + goto out_err; +@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, + if (ttm->caching_state == c_state) + return 0; + +- if (c_state != tt_cached) { +- ret = ttm_tt_populate(ttm); +- if (unlikely(ret != 0)) +- return ret; ++ if (ttm->state == tt_unpopulated) { ++ /* Change caching but don't populate */ ++ ttm->caching_state = c_state; ++ return 0; + } + + if (ttm->caching_state == tt_cached) +@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); + static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) + { + int i; ++ unsigned count = 0; ++ struct list_head h; + struct page *cur_page; + struct ttm_backend *be = ttm->be; + ++ INIT_LIST_HEAD(&h); ++ + if (be) + be->func->clear(be); +- (void)ttm_tt_set_caching(ttm, tt_cached); + for (i = 0; i < ttm->num_pages; ++i) { ++ + cur_page = ttm->pages[i]; + ttm->pages[i] = NULL; + if (cur_page) { +@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) + "Leaking pages.\n"); + ttm_mem_global_free_page(ttm->glob->mem_glob, + cur_page); +- __free_page(cur_page); ++ list_add(&cur_page->lru, &h); ++ count++; + } + } ++ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); + ttm->state = tt_unpopulated; + ttm->first_himem_page = ttm->num_pages; + ttm->last_lomem_page = -1; +diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile +index 1a3cb68..4505e17 100644 +--- a/drivers/gpu/drm/vmwgfx/Makefile ++++ b/drivers/gpu/drm/vmwgfx/Makefile +@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm + vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ + vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ + vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ +- vmwgfx_overlay.o ++ vmwgfx_overlay.o vmwgfx_fence.o + + obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +index 825ebe3..c4f5114 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) + int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) + { +- struct vmw_private *dev_priv = +- container_of(bdev, struct vmw_private, bdev); +- + switch (type) { + case TTM_PL_SYSTEM: + /* System memory */ +@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + case TTM_PL_VRAM: + /* "On-card" video ram */ + man->gpu_offset = 0; +- man->io_offset = dev_priv->vram_start; +- man->io_size = dev_priv->vram_size; +- man->flags = TTM_MEMTYPE_FLAG_FIXED | +- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; +- man->io_addr = NULL; ++ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_WC; + break; +@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) + vmw_dmabuf_gmr_unbind(bo); + } + ++static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ++ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); ++ ++ mem->bus.addr = NULL; ++ mem->bus.is_iomem = false; ++ mem->bus.offset = 0; ++ mem->bus.size = mem->num_pages << PAGE_SHIFT; ++ mem->bus.base = 0; ++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) ++ return -EINVAL; ++ switch (mem->mem_type) { ++ case TTM_PL_SYSTEM: ++ /* System memory */ ++ return 0; ++ case TTM_PL_VRAM: ++ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; ++ mem->bus.base = dev_priv->vram_start; ++ mem->bus.is_iomem = true; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ++{ ++} ++ ++static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) ++{ ++ return 0; ++} ++ + /** + * FIXME: We're using the old vmware polling method to sync. + * Do this with fences instead. +@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = { + .sync_obj_unref = vmw_sync_obj_unref, + .sync_obj_ref = vmw_sync_obj_ref, + .move_notify = vmw_move_notify, +- .swap_notify = vmw_swap_notify ++ .swap_notify = vmw_swap_notify, ++ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, ++ .io_mem_reserve = &vmw_ttm_io_mem_reserve, ++ .io_mem_free = &vmw_ttm_io_mem_free, + }; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index 0c9c081..b793c8c 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -88,6 +88,9 @@ + #define DRM_IOCTL_VMW_FENCE_WAIT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ + struct drm_vmw_fence_wait_arg) ++#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ ++ struct drm_vmw_update_layout_arg) + + + /** +@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { + VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, + DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, +- DRM_AUTH | DRM_UNLOCKED) ++ DRM_AUTH | DRM_UNLOCKED), ++ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, ++ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) + }; + + static struct pci_device_id vmw_pci_id_list[] = { +@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) + goto out_err3; + } + ++ /* Need mmio memory to check for fifo pitchlock cap. */ ++ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && ++ !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && ++ !vmw_fifo_have_pitchlock(dev_priv)) { ++ ret = -ENOSYS; ++ DRM_ERROR("Hardware has no pitchlock\n"); ++ goto out_err4; ++ } ++ + dev_priv->tdev = ttm_object_device_init + (dev_priv->mem_global_ref.object, 12); + +@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev) + { + struct vmw_private *dev_priv = vmw_priv(dev); + +- DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); +- + unregister_pm_notifier(&dev_priv->pm_nb); + + vmw_fb_close(dev_priv); +@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev, + { + struct vmw_master *vmaster; + +- DRM_INFO("Master create.\n"); + vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); + if (unlikely(vmaster == NULL)) + return -ENOMEM; +@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev, + { + struct vmw_master *vmaster = vmw_master(master); + +- DRM_INFO("Master destroy.\n"); + master->driver_priv = NULL; + kfree(vmaster); + } +@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev, + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret = 0; + +- DRM_INFO("Master set.\n"); +- + if (active) { + BUG_ON(active != &dev_priv->fbdev_master); + ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); +@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev, + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret; + +- DRM_INFO("Master drop.\n"); +- + /** + * Make sure the master doesn't disappear while we have + * it locked. +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +index 356dc93..eaad520 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -41,12 +41,13 @@ + + #define VMWGFX_DRIVER_DATE "20100209" + #define VMWGFX_DRIVER_MAJOR 1 +-#define VMWGFX_DRIVER_MINOR 0 ++#define VMWGFX_DRIVER_MINOR 2 + #define VMWGFX_DRIVER_PATCHLEVEL 0 + #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 + #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) + #define VMWGFX_MAX_RELOCATIONS 2048 + #define VMWGFX_MAX_GMRS 2048 ++#define VMWGFX_MAX_DISPLAYS 16 + + struct vmw_fpriv { + struct drm_master *locked_master; +@@ -102,6 +103,13 @@ struct vmw_surface { + struct vmw_cursor_snooper snooper; + }; + ++struct vmw_fence_queue { ++ struct list_head head; ++ struct timespec lag; ++ struct timespec lag_time; ++ spinlock_t lock; ++}; ++ + struct vmw_fifo_state { + unsigned long reserved_size; + __le32 *dynamic_buffer; +@@ -115,6 +123,7 @@ struct vmw_fifo_state { + uint32_t capabilities; + struct mutex fifo_mutex; + struct rw_semaphore rwsem; ++ struct vmw_fence_queue fence_queue; + }; + + struct vmw_relocation { +@@ -144,6 +153,14 @@ struct vmw_master { + struct ttm_lock lock; + }; + ++struct vmw_vga_topology_state { ++ uint32_t width; ++ uint32_t height; ++ uint32_t primary; ++ uint32_t pos_x; ++ uint32_t pos_y; ++}; ++ + struct vmw_private { + struct ttm_bo_device bdev; + struct ttm_bo_global_ref bo_global_ref; +@@ -171,14 +188,19 @@ struct vmw_private { + * VGA registers. + */ + ++ struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; + uint32_t vga_width; + uint32_t vga_height; + uint32_t vga_depth; + uint32_t vga_bpp; + uint32_t vga_pseudo; + uint32_t vga_red_mask; +- uint32_t vga_blue_mask; + uint32_t vga_green_mask; ++ uint32_t vga_blue_mask; ++ uint32_t vga_bpl; ++ uint32_t vga_pitchlock; ++ ++ uint32_t num_displays; + + /* + * Framebuffer info. +@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, + extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); + extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); + extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); ++extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); + + /** + * TTM glue - vmwgfx_ttm_glue.c +@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, + uint32_t sequence, + bool interruptible, + unsigned long timeout); ++extern void vmw_update_sequence(struct vmw_private *dev_priv, ++ struct vmw_fifo_state *fifo_state); ++ ++ ++/** ++ * Rudimentary fence objects currently used only for throttling - ++ * vmwgfx_fence.c ++ */ ++ ++extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); ++extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); ++extern int vmw_fence_push(struct vmw_fence_queue *queue, ++ uint32_t sequence); ++extern int vmw_fence_pull(struct vmw_fence_queue *queue, ++ uint32_t signaled_sequence); ++extern int vmw_wait_lag(struct vmw_private *dev_priv, ++ struct vmw_fence_queue *queue, uint32_t us); + + /** + * Kernel framebuffer - vmwgfx_fb.c +@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, + struct ttm_object_file *tfile, + struct ttm_buffer_object *bo, + SVGA3dCmdHeader *header); ++void vmw_kms_write_svga(struct vmw_private *vmw_priv, ++ unsigned width, unsigned height, unsigned pitch, ++ unsigned bbp, unsigned depth); ++int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); + + /** + * Overlay control - vmwgfx_overlay.c +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index 0897359..8e39685 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, + * Put BO in VRAM, only if there is space. + */ + +- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); ++ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); + if (unlikely(ret == -ERESTARTSYS)) + return ret; + +@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, + * previous contents. + */ + +- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); ++ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); + return ret; + } + +@@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, + ret = copy_from_user(cmd, user_cmd, arg->command_size); + + if (unlikely(ret != 0)) { ++ ret = -EFAULT; + DRM_ERROR("Failed copying commands.\n"); + goto out_commit; + } +@@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, + goto out_err; + + vmw_apply_relocations(sw_context); ++ ++ if (arg->throttle_us) { ++ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, ++ arg->throttle_us); ++ ++ if (unlikely(ret != 0)) ++ goto out_err; ++ } ++ + vmw_fifo_commit(dev_priv, arg->command_size); + + ret = vmw_fifo_send_fence(dev_priv, &sequence); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +index a933670..b0866f0 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, + return -EINVAL; + } + +- /* without multimon its hard to resize */ +- if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && +- (var->xres != par->max_width || +- var->yres != par->max_height)) { +- DRM_ERROR("Tried to resize, but we don't have multimon\n"); ++ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && ++ (var->xoffset != 0 || var->yoffset != 0)) { ++ DRM_ERROR("Can not handle panning without display topology\n"); + return -EINVAL; + } + +- if (var->xres > par->max_width || +- var->yres > par->max_height) { ++ if ((var->xoffset + var->xres) > par->max_width || ++ (var->yoffset + var->yres) > par->max_height) { + DRM_ERROR("Requested geom can not fit in framebuffer\n"); + return -EINVAL; + } +@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info) + struct vmw_fb_par *par = info->par; + struct vmw_private *vmw_priv = par->vmw_priv; + +- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { +- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); +- +- vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); +- vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); +- vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); +- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); +- vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); +- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); +- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); +- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); +- ++ vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, ++ info->fix.line_length, ++ par->bpp, par->depth); ++ if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { + /* TODO check if pitch and offset changes */ +- + vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); +@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info) + vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); +- } else { +- vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); +- vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); +- +- /* TODO check if pitch and offset changes */ + } + ++ /* This is really helpful since if this fails the user ++ * can probably not see anything on the screen. ++ */ ++ WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); ++ + return 0; + } + +@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) + unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; + int ret; + ++ /* XXX These shouldn't be hardcoded. */ + initial_width = 800; + initial_height = 600; + + fb_bbp = 32; + fb_depth = 24; + +- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { +- fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); +- fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); +- } else { +- fb_width = min(vmw_priv->fb_max_width, initial_width); +- fb_height = min(vmw_priv->fb_max_height, initial_height); +- } ++ /* XXX As shouldn't these be as well. */ ++ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); ++ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); + + initial_width = min(fb_width, initial_width); + initial_height = min(fb_height, initial_height); + +- vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); +- vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); +- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); +- vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); +- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); +- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); +- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); +- +- fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); ++ fb_pitch = fb_width * fb_bbp / 8; ++ fb_size = fb_pitch * fb_height; + fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); +- fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); +- +- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); +- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); +- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); +- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); +- DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); +- DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); +- DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); +- DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); +- DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); +- DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); +- DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); +- DRM_DEBUG("fb_pitch %u\n", fb_pitch); +- DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); + + info = framebuffer_alloc(sizeof(*par), device); + if (!info) +@@ -559,8 +516,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv) + info->pixmap.scan_align = 1; + #endif + +- info->aperture_base = vmw_priv->vram_start; +- info->aperture_size = vmw_priv->vram_size; ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ ret = -ENOMEM; ++ goto err_aper; ++ } ++ info->apertures->ranges[0].base = vmw_priv->vram_start; ++ info->apertures->ranges[0].size = vmw_priv->vram_size; + + /* + * Dirty & Deferred IO +@@ -580,6 +542,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) + + err_defio: + fb_deferred_io_cleanup(info); ++err_aper: + ttm_bo_kunmap(&par->map); + err_unref: + ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); +@@ -628,7 +591,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, + if (unlikely(ret != 0)) + return ret; + +- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); ++ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); + ttm_bo_unreserve(bo); + + return ret; +@@ -652,7 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, + if (unlikely(ret != 0)) + goto err_unlock; + +- ret = ttm_bo_validate(bo, &ne_placement, false, false); ++ ret = ttm_bo_validate(bo, &ne_placement, false, false, false); ++ ++ /* Could probably bug on */ ++ WARN_ON(bo->offset != 0); ++ + ttm_bo_unreserve(bo); + err_unlock: + ttm_write_unlock(&vmw_priv->active_master->lock); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +new file mode 100644 +index 0000000..61eacc1 +--- /dev/null ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -0,0 +1,173 @@ ++/************************************************************************** ++ * ++ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++ ++ ++#include "vmwgfx_drv.h" ++ ++struct vmw_fence { ++ struct list_head head; ++ uint32_t sequence; ++ struct timespec submitted; ++}; ++ ++void vmw_fence_queue_init(struct vmw_fence_queue *queue) ++{ ++ INIT_LIST_HEAD(&queue->head); ++ queue->lag = ns_to_timespec(0); ++ getrawmonotonic(&queue->lag_time); ++ spin_lock_init(&queue->lock); ++} ++ ++void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) ++{ ++ struct vmw_fence *fence, *next; ++ ++ spin_lock(&queue->lock); ++ list_for_each_entry_safe(fence, next, &queue->head, head) { ++ kfree(fence); ++ } ++ spin_unlock(&queue->lock); ++} ++ ++int vmw_fence_push(struct vmw_fence_queue *queue, ++ uint32_t sequence) ++{ ++ struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); ++ ++ if (unlikely(!fence)) ++ return -ENOMEM; ++ ++ fence->sequence = sequence; ++ getrawmonotonic(&fence->submitted); ++ spin_lock(&queue->lock); ++ list_add_tail(&fence->head, &queue->head); ++ spin_unlock(&queue->lock); ++ ++ return 0; ++} ++ ++int vmw_fence_pull(struct vmw_fence_queue *queue, ++ uint32_t signaled_sequence) ++{ ++ struct vmw_fence *fence, *next; ++ struct timespec now; ++ bool updated = false; ++ ++ spin_lock(&queue->lock); ++ getrawmonotonic(&now); ++ ++ if (list_empty(&queue->head)) { ++ queue->lag = ns_to_timespec(0); ++ queue->lag_time = now; ++ updated = true; ++ goto out_unlock; ++ } ++ ++ list_for_each_entry_safe(fence, next, &queue->head, head) { ++ if (signaled_sequence - fence->sequence > (1 << 30)) ++ continue; ++ ++ queue->lag = timespec_sub(now, fence->submitted); ++ queue->lag_time = now; ++ updated = true; ++ list_del(&fence->head); ++ kfree(fence); ++ } ++ ++out_unlock: ++ spin_unlock(&queue->lock); ++ ++ return (updated) ? 0 : -EBUSY; ++} ++ ++static struct timespec vmw_timespec_add(struct timespec t1, ++ struct timespec t2) ++{ ++ t1.tv_sec += t2.tv_sec; ++ t1.tv_nsec += t2.tv_nsec; ++ if (t1.tv_nsec >= 1000000000L) { ++ t1.tv_sec += 1; ++ t1.tv_nsec -= 1000000000L; ++ } ++ ++ return t1; ++} ++ ++static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) ++{ ++ struct timespec now; ++ ++ spin_lock(&queue->lock); ++ getrawmonotonic(&now); ++ queue->lag = vmw_timespec_add(queue->lag, ++ timespec_sub(now, queue->lag_time)); ++ queue->lag_time = now; ++ spin_unlock(&queue->lock); ++ return queue->lag; ++} ++ ++ ++static bool vmw_lag_lt(struct vmw_fence_queue *queue, ++ uint32_t us) ++{ ++ struct timespec lag, cond; ++ ++ cond = ns_to_timespec((s64) us * 1000); ++ lag = vmw_fifo_lag(queue); ++ return (timespec_compare(&lag, &cond) < 1); ++} ++ ++int vmw_wait_lag(struct vmw_private *dev_priv, ++ struct vmw_fence_queue *queue, uint32_t us) ++{ ++ struct vmw_fence *fence; ++ uint32_t sequence; ++ int ret; ++ ++ while (!vmw_lag_lt(queue, us)) { ++ spin_lock(&queue->lock); ++ if (list_empty(&queue->head)) ++ sequence = atomic_read(&dev_priv->fence_seq); ++ else { ++ fence = list_first_entry(&queue->head, ++ struct vmw_fence, head); ++ sequence = fence->sequence; ++ } ++ spin_unlock(&queue->lock); ++ ++ ret = vmw_wait_fence(dev_priv, false, sequence, true, ++ 3*HZ); ++ ++ if (unlikely(ret != 0)) ++ return ret; ++ ++ (void) vmw_fence_pull(queue, sequence); ++ } ++ return 0; ++} ++ ++ +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index 39d43a0..e6a1eb7 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t fifo_min, hwversion; + ++ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) ++ return false; ++ + fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); + if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) + return false; +@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) + return true; + } + ++bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) ++{ ++ __le32 __iomem *fifo_mem = dev_priv->mmio_virt; ++ uint32_t caps; ++ ++ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) ++ return false; ++ ++ caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); ++ if (caps & SVGA_FIFO_CAP_PITCHLOCK) ++ return true; ++ ++ return false; ++} ++ + int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + { + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; +@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + + atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); + iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); +- ++ vmw_fence_queue_init(&fifo->fence_queue); + return vmw_fifo_send_fence(dev_priv, &dummy); + out_err: + vfree(fifo->static_buffer); +@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + dev_priv->enable_state); + + mutex_unlock(&dev_priv->hw_mutex); ++ vmw_fence_queue_takedown(&fifo->fence_queue); + + if (likely(fifo->last_buffer != NULL)) { + vfree(fifo->last_buffer); +@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) + fifo_state->last_buffer_add = true; + vmw_fifo_commit(dev_priv, bytes); + fifo_state->last_buffer_add = false; ++ (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); ++ vmw_update_sequence(dev_priv, fifo_state); + + out_err: + return ret; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +index 4d7cb53..e92298a 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) + return (busy == 0); + } + ++void vmw_update_sequence(struct vmw_private *dev_priv, ++ struct vmw_fifo_state *fifo_state) ++{ ++ __le32 __iomem *fifo_mem = dev_priv->mmio_virt; ++ ++ uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); ++ ++ if (dev_priv->last_read_sequence != sequence) { ++ dev_priv->last_read_sequence = sequence; ++ vmw_fence_pull(&fifo_state->fence_queue, sequence); ++ } ++} + + bool vmw_fence_signaled(struct vmw_private *dev_priv, + uint32_t sequence) + { +- __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + struct vmw_fifo_state *fifo_state; + bool ret; + + if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) + return true; + +- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); ++ fifo_state = &dev_priv->fifo; ++ vmw_update_sequence(dev_priv, fifo_state); + if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) + return true; + +- fifo_state = &dev_priv->fifo; + if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && + vmw_fifo_idle(dev_priv, sequence)) + return true; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index 31f9afe..f1d6261 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -30,6 +30,8 @@ + /* Might need a hrtimer here? */ + #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) + ++static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); ++static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); + + void vmw_display_unit_cleanup(struct vmw_display_unit *du) + { +@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, + struct vmw_framebuffer_surface { + struct vmw_framebuffer base; + struct vmw_surface *surface; ++ struct vmw_dma_buffer *buffer; + struct delayed_work d_work; + struct mutex work_lock; + bool present_fs; +@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, + vfbs->base.base.depth = 24; + vfbs->base.base.width = width; + vfbs->base.base.height = height; +- vfbs->base.pin = NULL; +- vfbs->base.unpin = NULL; ++ vfbs->base.pin = &vmw_surface_dmabuf_pin; ++ vfbs->base.unpin = &vmw_surface_dmabuf_unpin; + vfbs->surface = surface; + mutex_init(&vfbs->work_lock); + INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); +@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { + .create_handle = vmw_framebuffer_create_handle, + }; + ++static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) ++{ ++ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); ++ struct vmw_framebuffer_surface *vfbs = ++ vmw_framebuffer_to_vfbs(&vfb->base); ++ unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; ++ int ret; ++ ++ vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); ++ if (unlikely(vfbs->buffer == NULL)) ++ return -ENOMEM; ++ ++ vmw_overlay_pause_all(dev_priv); ++ ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, ++ &vmw_vram_ne_placement, ++ false, &vmw_dmabuf_bo_free); ++ vmw_overlay_resume_all(dev_priv); ++ ++ return ret; ++} ++ ++static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) ++{ ++ struct ttm_buffer_object *bo; ++ struct vmw_framebuffer_surface *vfbs = ++ vmw_framebuffer_to_vfbs(&vfb->base); ++ ++ bo = &vfbs->buffer->base; ++ ttm_bo_unref(&bo); ++ vfbs->buffer = NULL; ++ ++ return 0; ++} ++ + static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) + { + struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); +@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) + vmw_framebuffer_to_vfbd(&vfb->base); + int ret; + ++ + vmw_overlay_pause_all(dev_priv); + + ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); + +- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { +- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); +- +- vmw_write(dev_priv, SVGA_REG_ENABLE, 1); +- vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); +- vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); +- vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); +- vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); +- vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); +- vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); +- vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); +- } else +- WARN_ON(true); +- + vmw_overlay_resume_all(dev_priv); + ++ WARN_ON(ret != 0); ++ + return 0; + } + +@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, + + /* XXX get the first 3 from the surface info */ + vfbd->base.base.bits_per_pixel = 32; +- vfbd->base.base.pitch = width * 32 / 4; ++ vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; + vfbd->base.base.depth = 24; + vfbd->base.base.width = width; + vfbd->base.base.height = height; +@@ -752,14 +771,8 @@ err_not_scanout: + return NULL; + } + +-static int vmw_kms_fb_changed(struct drm_device *dev) +-{ +- return 0; +-} +- + static struct drm_mode_config_funcs vmw_kms_funcs = { + .fb_create = vmw_kms_fb_create, +- .fb_changed = vmw_kms_fb_changed, + }; + + int vmw_kms_init(struct vmw_private *dev_priv) +@@ -771,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) + dev->mode_config.funcs = &vmw_kms_funcs; + dev->mode_config.min_width = 1; + dev->mode_config.min_height = 1; +- dev->mode_config.max_width = dev_priv->fb_max_width; +- dev->mode_config.max_height = dev_priv->fb_max_height; ++ /* assumed largest fb size */ ++ dev->mode_config.max_width = 8192; ++ dev->mode_config.max_height = 8192; + + ret = vmw_kms_init_legacy_display_system(dev_priv); + +@@ -832,49 +846,140 @@ out: + return ret; + } + ++void vmw_kms_write_svga(struct vmw_private *vmw_priv, ++ unsigned width, unsigned height, unsigned pitch, ++ unsigned bbp, unsigned depth) ++{ ++ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) ++ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); ++ else if (vmw_fifo_have_pitchlock(vmw_priv)) ++ iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); ++ vmw_write(vmw_priv, SVGA_REG_WIDTH, width); ++ vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); ++ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); ++ vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); ++ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); ++ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); ++ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); ++} ++ + int vmw_kms_save_vga(struct vmw_private *vmw_priv) + { +- /* +- * setup a single multimon monitor with the size +- * of 0x0, this stops the UI from resizing when we +- * change the framebuffer size +- */ +- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { +- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); +- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); +- } ++ struct vmw_vga_topology_state *save; ++ uint32_t i; + + vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); + vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); +- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); + vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); ++ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); + vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); + vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); +- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); + vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); ++ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); ++ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) ++ vmw_priv->vga_pitchlock = ++ vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); ++ else if (vmw_fifo_have_pitchlock(vmw_priv)) ++ vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + ++ SVGA_FIFO_PITCHLOCK); ++ ++ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) ++ return 0; + ++ vmw_priv->num_displays = vmw_read(vmw_priv, ++ SVGA_REG_NUM_GUEST_DISPLAYS); ++ ++ for (i = 0; i < vmw_priv->num_displays; ++i) { ++ save = &vmw_priv->vga_save[i]; ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); ++ save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); ++ save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); ++ save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); ++ save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); ++ save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); ++ } + return 0; + } + + int vmw_kms_restore_vga(struct vmw_private *vmw_priv) + { ++ struct vmw_vga_topology_state *save; ++ uint32_t i; ++ + vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); + vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); +- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); + vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); ++ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); + vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); + vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); + vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); + vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); ++ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) ++ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, ++ vmw_priv->vga_pitchlock); ++ else if (vmw_fifo_have_pitchlock(vmw_priv)) ++ iowrite32(vmw_priv->vga_pitchlock, ++ vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); ++ ++ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) ++ return 0; + +- /* TODO check for multimon */ +- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); ++ for (i = 0; i < vmw_priv->num_displays; ++i) { ++ save = &vmw_priv->vga_save[i]; ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); ++ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); ++ } + + return 0; + } ++ ++int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct vmw_private *dev_priv = vmw_priv(dev); ++ struct drm_vmw_update_layout_arg *arg = ++ (struct drm_vmw_update_layout_arg *)data; ++ struct vmw_master *vmaster = vmw_master(file_priv->master); ++ void __user *user_rects; ++ struct drm_vmw_rect *rects; ++ unsigned rects_size; ++ int ret; ++ ++ ret = ttm_read_lock(&vmaster->lock, true); ++ if (unlikely(ret != 0)) ++ return ret; ++ ++ if (!arg->num_outputs) { ++ struct drm_vmw_rect def_rect = {0, 0, 800, 600}; ++ vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); ++ goto out_unlock; ++ } ++ ++ rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); ++ rects = kzalloc(rects_size, GFP_KERNEL); ++ if (unlikely(!rects)) { ++ ret = -ENOMEM; ++ goto out_unlock; ++ } ++ ++ user_rects = (void __user *)(unsigned long)arg->rects; ++ ret = copy_from_user(rects, user_rects, rects_size); ++ if (unlikely(ret != 0)) { ++ DRM_ERROR("Failed to get rects.\n"); ++ goto out_free; ++ } ++ ++ vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); ++ ++out_free: ++ kfree(rects); ++out_unlock: ++ ttm_read_unlock(&vmaster->lock); ++ return ret; ++} +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +index 8b95249..8a398a0 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); + + /* +- * Legacy display unit functions - vmwgfx_ldu.h ++ * Legacy display unit functions - vmwgfx_ldu.c + */ + int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); + int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); ++int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, ++ struct drm_vmw_rect *rects); + + #endif +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +index 9089159..cfaf690 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +@@ -38,6 +38,7 @@ struct vmw_legacy_display { + struct list_head active; + + unsigned num_active; ++ unsigned last_num_active; + + struct vmw_framebuffer *fb; + }; +@@ -48,9 +49,12 @@ struct vmw_legacy_display { + struct vmw_legacy_display_unit { + struct vmw_display_unit base; + +- struct list_head active; ++ unsigned pref_width; ++ unsigned pref_height; ++ bool pref_active; ++ struct drm_display_mode *pref_mode; + +- unsigned unit; ++ struct list_head active; + }; + + static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) +@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) + { + struct vmw_legacy_display *lds = dev_priv->ldu_priv; + struct vmw_legacy_display_unit *entry; +- struct drm_crtc *crtc; ++ struct drm_framebuffer *fb = NULL; ++ struct drm_crtc *crtc = NULL; + int i = 0; + +- /* to stop the screen from changing size on resize */ +- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); +- for (i = 0; i < lds->num_active; i++) { +- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); +- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); ++ /* If there is no display topology the host just assumes ++ * that the guest will set the same layout as the host. ++ */ ++ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { ++ int w = 0, h = 0; ++ list_for_each_entry(entry, &lds->active, active) { ++ crtc = &entry->base.crtc; ++ w = max(w, crtc->x + crtc->mode.hdisplay); ++ h = max(h, crtc->y + crtc->mode.vdisplay); ++ i++; ++ } ++ ++ if (crtc == NULL) ++ return 0; ++ fb = entry->base.crtc.fb; ++ ++ vmw_kms_write_svga(dev_priv, w, h, fb->pitch, ++ fb->bits_per_pixel, fb->depth); ++ ++ return 0; + } + +- /* Now set the mode */ +- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); ++ if (!list_empty(&lds->active)) { ++ entry = list_entry(lds->active.next, typeof(*entry), active); ++ fb = entry->base.crtc.fb; ++ ++ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, ++ fb->bits_per_pixel, fb->depth); ++ } ++ ++ /* Make sure we always show something. */ ++ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, ++ lds->num_active ? lds->num_active : 1); ++ + i = 0; + list_for_each_entry(entry, &lds->active, active) { + crtc = &entry->base.crtc; +@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) + i++; + } + ++ BUG_ON(i != lds->num_active); ++ ++ lds->last_num_active = lds->num_active; ++ + return 0; + } + +@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, + if (list_empty(&ldu->active)) + return 0; + ++ /* Must init otherwise list_empty(&ldu->active) will not work. */ + list_del_init(&ldu->active); + if (--(ld->num_active) == 0) { + BUG_ON(!ld->fb); +@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, + struct vmw_legacy_display_unit *entry; + struct list_head *at; + ++ BUG_ON(!ld->num_active && ld->fb); ++ if (vfb != ld->fb) { ++ if (ld->fb && ld->fb->unpin) ++ ld->fb->unpin(ld->fb); ++ if (vfb->pin) ++ vfb->pin(vfb); ++ ld->fb = vfb; ++ } ++ + if (!list_empty(&ldu->active)) + return 0; + + at = &ld->active; + list_for_each_entry(entry, &ld->active, active) { +- if (entry->unit > ldu->unit) ++ if (entry->base.unit > ldu->base.unit) + break; + + at = &entry->active; + } + + list_add(&ldu->active, at); +- if (ld->num_active++ == 0) { +- BUG_ON(ld->fb); +- if (vfb->pin) +- vfb->pin(vfb); +- ld->fb = vfb; +- } ++ ++ ld->num_active++; + + return 0; + } +@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) + + /* ldu only supports one fb active at the time */ + if (dev_priv->ldu_priv->fb && vfb && ++ !(dev_priv->ldu_priv->num_active == 1 && ++ !list_empty(&ldu->active)) && + dev_priv->ldu_priv->fb != vfb) { + DRM_ERROR("Multiple framebuffers not supported\n"); + return -EINVAL; +@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) + static enum drm_connector_status + vmw_ldu_connector_detect(struct drm_connector *connector) + { +- /* XXX vmwctrl should control connection status */ +- if (vmw_connector_to_ldu(connector)->base.unit == 0) ++ if (vmw_connector_to_ldu(connector)->pref_active) + return connector_status_connected; + return connector_status_disconnected; + } +@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { + 752, 800, 0, 480, 489, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 800x600@60Hz */ +- { DRM_MODE("800x600", +- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +- 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, +- 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, ++ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, ++ 968, 1056, 0, 600, 601, 605, 628, 0, ++ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@60Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, + 1184, 1344, 0, 768, 771, 777, 806, 0, +@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { + static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, + uint32_t max_width, uint32_t max_height) + { ++ struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode = NULL; ++ struct drm_display_mode prefmode = { DRM_MODE("preferred", ++ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) ++ }; + int i; + ++ /* Add preferred mode */ ++ { ++ mode = drm_mode_duplicate(dev, &prefmode); ++ if (!mode) ++ return 0; ++ mode->hdisplay = ldu->pref_width; ++ mode->vdisplay = ldu->pref_height; ++ mode->vrefresh = drm_mode_vrefresh(mode); ++ drm_mode_probed_add(connector, mode); ++ ++ if (ldu->pref_mode) { ++ list_del_init(&ldu->pref_mode->head); ++ drm_mode_destroy(dev, ldu->pref_mode); ++ } ++ ++ ldu->pref_mode = mode; ++ } ++ + for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { + if (vmw_ldu_connector_builtin[i].hdisplay > max_width || + vmw_ldu_connector_builtin[i].vdisplay > max_height) +@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) + if (!ldu) + return -ENOMEM; + +- ldu->unit = unit; ++ ldu->base.unit = unit; + crtc = &ldu->base.crtc; + encoder = &ldu->base.encoder; + connector = &ldu->base.connector; + ++ INIT_LIST_HEAD(&ldu->active); ++ ++ ldu->pref_active = (unit == 0); ++ ldu->pref_width = 800; ++ ldu->pref_height = 600; ++ ldu->pref_mode = NULL; ++ + drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); +- /* Initial status */ +- if (unit == 0) +- connector->status = connector_status_connected; +- else +- connector->status = connector_status_disconnected; ++ connector->status = vmw_ldu_connector_detect(connector); + + drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, + DRM_MODE_ENCODER_LVDS); +@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) + encoder->possible_crtcs = (1 << unit); + encoder->possible_clones = 0; + +- INIT_LIST_HEAD(&ldu->active); +- + drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); + + drm_connector_attach_property(connector, +@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) + + INIT_LIST_HEAD(&dev_priv->ldu_priv->active); + dev_priv->ldu_priv->num_active = 0; ++ dev_priv->ldu_priv->last_num_active = 0; + dev_priv->ldu_priv->fb = NULL; + + drm_mode_create_dirty_info_property(dev_priv->dev); + + vmw_ldu_init(dev_priv, 0); +- vmw_ldu_init(dev_priv, 1); +- vmw_ldu_init(dev_priv, 2); +- vmw_ldu_init(dev_priv, 3); +- vmw_ldu_init(dev_priv, 4); +- vmw_ldu_init(dev_priv, 5); +- vmw_ldu_init(dev_priv, 6); +- vmw_ldu_init(dev_priv, 7); ++ /* for old hardware without multimon only enable one display */ ++ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { ++ vmw_ldu_init(dev_priv, 1); ++ vmw_ldu_init(dev_priv, 2); ++ vmw_ldu_init(dev_priv, 3); ++ vmw_ldu_init(dev_priv, 4); ++ vmw_ldu_init(dev_priv, 5); ++ vmw_ldu_init(dev_priv, 6); ++ vmw_ldu_init(dev_priv, 7); ++ } + + return 0; + } +@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) + + return 0; + } ++ ++int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, ++ struct drm_vmw_rect *rects) ++{ ++ struct drm_device *dev = dev_priv->dev; ++ struct vmw_legacy_display_unit *ldu; ++ struct drm_connector *con; ++ int i; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ ++#if 0 ++ DRM_INFO("%s: new layout ", __func__); ++ for (i = 0; i < (int)num; i++) ++ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, ++ rects[i].w, rects[i].h); ++ DRM_INFO("\n"); ++#else ++ (void)i; ++#endif ++ ++ list_for_each_entry(con, &dev->mode_config.connector_list, head) { ++ ldu = vmw_connector_to_ldu(con); ++ if (num > ldu->base.unit) { ++ ldu->pref_width = rects[ldu->base.unit].w; ++ ldu->pref_height = rects[ldu->base.unit].h; ++ ldu->pref_active = true; ++ } else { ++ ldu->pref_width = 800; ++ ldu->pref_height = 600; ++ ldu->pref_active = false; ++ } ++ con->status = vmw_ldu_connector_detect(con); ++ } ++ ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +index 5b6eabe..df2036e 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, + if (pin) + overlay_placement = &vmw_vram_ne_placement; + +- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); ++ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false); + + ttm_bo_unreserve(bo); + +@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, + if (stream->buf != buf) + stream->buf = vmw_dmabuf_reference(buf); + stream->saved = *arg; ++ /* stream is no longer stopped/paused */ ++ stream->paused = false; + + return 0; + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +index f8fbbc6..8612378 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +@@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + + ret = copy_from_user(srf->sizes, user_sizes, + srf->num_sizes * sizeof(*srf->sizes)); +- if (unlikely(ret != 0)) ++ if (unlikely(ret != 0)) { ++ ret = -EFAULT; + goto out_err1; ++ } + + if (srf->scanout && + srf->num_sizes == 1 && +@@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, + if (user_sizes) + ret = copy_to_user(user_sizes, srf->sizes, + srf->num_sizes * sizeof(*srf->sizes)); +- if (unlikely(ret != 0)) ++ if (unlikely(ret != 0)) { + DRM_ERROR("copy_to_user failed %p %u\n", + user_sizes, srf->num_sizes); ++ ret = -EFAULT; ++ } + out_bad_resource: + out_no_reference: + ttm_base_object_unref(&base); +diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig +index 61ab4da..8d0e31a 100644 +--- a/drivers/gpu/vga/Kconfig ++++ b/drivers/gpu/vga/Kconfig +@@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS + multiple GPUS. The overhead for each GPU is very small. + + config VGA_SWITCHEROO +- bool "Laptop Hybrid Grapics - GPU switching support" ++ bool "Laptop Hybrid Graphics - GPU switching support" + depends on X86 + depends on ACPI + help +- Many laptops released in 2008/9/10 have two gpus with a multiplxer ++ Many laptops released in 2008/9/10 have two GPUs with a multiplexer + to switch between them. This adds support for dynamic switching when + X isn't running and delayed switching until the next logoff. This +- features is called hybrid graphics, ATI PowerXpress, and Nvidia ++ feature is called hybrid graphics, ATI PowerXpress, and Nvidia + HybridPower. +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c +index 441e38c..b87569e 100644 +--- a/drivers/gpu/vga/vgaarb.c ++++ b/drivers/gpu/vga/vgaarb.c +@@ -1,12 +1,32 @@ + /* +- * vgaarb.c ++ * vgaarb.c: Implements the VGA arbitration. For details refer to ++ * Documentation/vgaarbiter.txt ++ * + * + * (C) Copyright 2005 Benjamin Herrenschmidt + * (C) Copyright 2007 Paulo R. Zanoni + * (C) Copyright 2007, 2009 Tiago Vignatti + * +- * Implements the VGA arbitration. For details refer to +- * Documentation/vgaarbiter.txt ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS ++ * IN THE SOFTWARE. ++ * + */ + + #include +@@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, + (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) + rsrc |= VGA_RSRC_LEGACY_MEM; + +- pr_devel("%s: %d\n", __func__, rsrc); +- pr_devel("%s: owns: %d\n", __func__, vgadev->owns); ++ pr_debug("%s: %d\n", __func__, rsrc); ++ pr_debug("%s: owns: %d\n", __func__, vgadev->owns); + + /* Check what resources we need to acquire */ + wants = rsrc & ~vgadev->owns; +@@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) + { + unsigned int old_locks = vgadev->locks; + +- pr_devel("%s\n", __func__); ++ pr_debug("%s\n", __func__); + + /* Update our counters, and account for equivalent legacy resources + * if we decode them +@@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, + else + vga_decode_count--; + } ++ pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); + } + + void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) +@@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + curr_pos += 5; + remaining -= 5; + +- pr_devel("client 0x%p called 'lock'\n", priv); ++ pr_debug("client 0x%p called 'lock'\n", priv); + + if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; +@@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + curr_pos += 7; + remaining -= 7; + +- pr_devel("client 0x%p called 'unlock'\n", priv); ++ pr_debug("client 0x%p called 'unlock'\n", priv); + + if (strncmp(curr_pos, "all", 3) == 0) + io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; +@@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + curr_pos += 8; + remaining -= 8; + +- pr_devel("client 0x%p called 'trylock'\n", priv); ++ pr_debug("client 0x%p called 'trylock'\n", priv); + + if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; +@@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + + curr_pos += 7; + remaining -= 7; +- pr_devel("client 0x%p called 'target'\n", priv); ++ pr_debug("client 0x%p called 'target'\n", priv); + /* if target is default */ + if (!strncmp(curr_pos, "default", 7)) + pdev = pci_dev_get(vga_default_device()); +@@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + ret_val = -EPROTO; + goto done; + } +- pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, ++ pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, + domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + pbus = pci_find_bus(domain, bus); +- pr_devel("vgaarb: pbus %p\n", pbus); ++ pr_debug("vgaarb: pbus %p\n", pbus); + if (pbus == NULL) { + pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", + domain, bus); +@@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + goto done; + } + pdev = pci_get_slot(pbus, devfn); +- pr_devel("vgaarb: pdev %p\n", pdev); ++ pr_debug("vgaarb: pdev %p\n", pdev); + if (!pdev) { + pr_err("vgaarb: invalid PCI address %x:%x\n", + bus, devfn); +@@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + } + + vgadev = vgadev_find(pdev); +- pr_devel("vgaarb: vgadev %p\n", vgadev); ++ pr_debug("vgaarb: vgadev %p\n", vgadev); + if (vgadev == NULL) { + pr_err("vgaarb: this pci device is not a vga device\n"); + pci_dev_put(pdev); +@@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, + } else if (strncmp(curr_pos, "decodes ", 8) == 0) { + curr_pos += 8; + remaining -= 8; +- pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); ++ pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); + + if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; +@@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) + { + struct vga_arb_private *priv = file->private_data; + +- pr_devel("%s\n", __func__); ++ pr_debug("%s\n", __func__); + + if (priv == NULL) + return -ENODEV; +@@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file) + struct vga_arb_private *priv; + unsigned long flags; + +- pr_devel("%s\n", __func__); ++ pr_debug("%s\n", __func__); + + priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); + if (priv == NULL) +@@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) + unsigned long flags; + int i; + +- pr_devel("%s\n", __func__); ++ pr_debug("%s\n", __func__); + + if (priv == NULL) + return -ENODEV; +@@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) + uc = &priv->cards[i]; + if (uc->pdev == NULL) + continue; +- pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", ++ pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n", + uc->io_cnt, uc->mem_cnt); + while (uc->io_cnt--) + vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); +@@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action, + struct pci_dev *pdev = to_pci_dev(dev); + bool notify = false; + +- pr_devel("%s\n", __func__); ++ pr_debug("%s\n", __func__); + + /* For now we're only intereted in devices added and removed. I didn't + * test this thing here, so someone needs to double check for the +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 7696a66..82cb8ff 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -91,8 +91,6 @@ source "drivers/staging/line6/Kconfig" + + source "drivers/gpu/drm/vmwgfx/Kconfig" + +-source "drivers/gpu/drm/nouveau/Kconfig" +- + source "drivers/staging/octeon/Kconfig" + + source "drivers/staging/serqt_usb2/Kconfig" +diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c +index ecf4055..4a56f46 100644 +--- a/drivers/video/efifb.c ++++ b/drivers/video/efifb.c +@@ -168,7 +168,7 @@ static void efifb_destroy(struct fb_info *info) + { + if (info->screen_base) + iounmap(info->screen_base); +- release_mem_region(info->aperture_base, info->aperture_size); ++ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); + framebuffer_release(info); + } + +@@ -292,8 +292,13 @@ static int __devinit efifb_probe(struct platform_device *dev) + info->pseudo_palette = info->par; + info->par = NULL; + +- info->aperture_base = efifb_fix.smem_start; +- info->aperture_size = size_remap; ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ err = -ENOMEM; ++ goto err_release_fb; ++ } ++ info->apertures->ranges[0].base = efifb_fix.smem_start; ++ info->apertures->ranges[0].size = size_remap; + + info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); + if (!info->screen_base) { +diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c +index a15b44e..e08b7b5 100644 +--- a/drivers/video/fbmem.c ++++ b/drivers/video/fbmem.c +@@ -1468,16 +1468,67 @@ static int fb_check_foreignness(struct fb_info *fi) + return 0; + } + +-static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw) ++static bool apertures_overlap(struct aperture *gen, struct aperture *hw) + { + /* is the generic aperture base the same as the HW one */ +- if (gen->aperture_base == hw->aperture_base) ++ if (gen->base == hw->base) + return true; + /* is the generic aperture base inside the hw base->hw base+size */ +- if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size) ++ if (gen->base > hw->base && gen->base <= hw->base + hw->size) + return true; + return false; + } ++ ++static bool fb_do_apertures_overlap(struct apertures_struct *gena, ++ struct apertures_struct *hwa) ++{ ++ int i, j; ++ if (!hwa || !gena) ++ return false; ++ ++ for (i = 0; i < hwa->count; ++i) { ++ struct aperture *h = &hwa->ranges[i]; ++ for (j = 0; j < gena->count; ++j) { ++ struct aperture *g = &gena->ranges[j]; ++ printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n", ++ g->base, g->size, h->base, h->size); ++ if (apertures_overlap(g, h)) ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++#define VGA_FB_PHYS 0xA0000 ++void remove_conflicting_framebuffers(struct apertures_struct *a, ++ const char *name, bool primary) ++{ ++ int i; ++ ++ /* check all firmware fbs and kick off if the base addr overlaps */ ++ for (i = 0 ; i < FB_MAX; i++) { ++ struct apertures_struct *gen_aper; ++ if (!registered_fb[i]) ++ continue; ++ ++ if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) ++ continue; ++ ++ gen_aper = registered_fb[i]->apertures; ++ if (fb_do_apertures_overlap(gen_aper, a) || ++ (primary && gen_aper && gen_aper->count && ++ gen_aper->ranges[0].base == VGA_FB_PHYS)) { ++ ++ printk(KERN_ERR "fb: conflicting fb hw usage " ++ "%s vs %s - removing generic driver\n", ++ name, registered_fb[i]->fix.id); ++ unregister_framebuffer(registered_fb[i]); ++ } ++ } ++} ++EXPORT_SYMBOL(remove_conflicting_framebuffers); ++ + /** + * register_framebuffer - registers a frame buffer device + * @fb_info: frame buffer info structure +@@ -1501,21 +1552,8 @@ register_framebuffer(struct fb_info *fb_info) + if (fb_check_foreignness(fb_info)) + return -ENOSYS; + +- /* check all firmware fbs and kick off if the base addr overlaps */ +- for (i = 0 ; i < FB_MAX; i++) { +- if (!registered_fb[i]) +- continue; +- +- if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) { +- if (fb_do_apertures_overlap(registered_fb[i], fb_info)) { +- printk(KERN_ERR "fb: conflicting fb hw usage " +- "%s vs %s - removing generic driver\n", +- fb_info->fix.id, +- registered_fb[i]->fix.id); +- unregister_framebuffer(registered_fb[i]); +- } +- } +- } ++ remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, ++ fb_is_primary_device(fb_info)); + + num_registered_fb++; + for (i = 0 ; i < FB_MAX; i++) +diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c +index 81aa312..0a08f13 100644 +--- a/drivers/video/fbsysfs.c ++++ b/drivers/video/fbsysfs.c +@@ -80,6 +80,7 @@ EXPORT_SYMBOL(framebuffer_alloc); + */ + void framebuffer_release(struct fb_info *info) + { ++ kfree(info->apertures); + kfree(info); + } + EXPORT_SYMBOL(framebuffer_release); +diff --git a/drivers/video/offb.c b/drivers/video/offb.c +index 61f8b8f..46dda7d 100644 +--- a/drivers/video/offb.c ++++ b/drivers/video/offb.c +@@ -285,7 +285,7 @@ static void offb_destroy(struct fb_info *info) + { + if (info->screen_base) + iounmap(info->screen_base); +- release_mem_region(info->aperture_base, info->aperture_size); ++ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); + framebuffer_release(info); + } + +@@ -491,8 +491,11 @@ static void __init offb_init_fb(const char *name, const char *full_name, + var->vmode = FB_VMODE_NONINTERLACED; + + /* set offb aperture size for generic probing */ +- info->aperture_base = address; +- info->aperture_size = fix->smem_len; ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) ++ goto out_aper; ++ info->apertures->ranges[0].base = address; ++ info->apertures->ranges[0].size = fix->smem_len; + + info->fbops = &offb_ops; + info->screen_base = ioremap(address, fix->smem_len); +@@ -501,17 +504,20 @@ static void __init offb_init_fb(const char *name, const char *full_name, + + fb_alloc_cmap(&info->cmap, 256, 0); + +- if (register_framebuffer(info) < 0) { +- iounmap(par->cmap_adr); +- par->cmap_adr = NULL; +- iounmap(info->screen_base); +- framebuffer_release(info); +- release_mem_region(res_start, res_size); +- return; +- } ++ if (register_framebuffer(info) < 0) ++ goto out_err; + + printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", + info->node, full_name); ++ return; ++ ++out_err: ++ iounmap(info->screen_base); ++out_aper: ++ iounmap(par->cmap_adr); ++ par->cmap_adr = NULL; ++ framebuffer_release(info); ++ release_mem_region(res_start, res_size); + } + + +diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c +index 0cadf7a..090aa1a 100644 +--- a/drivers/video/vesafb.c ++++ b/drivers/video/vesafb.c +@@ -177,7 +177,7 @@ static void vesafb_destroy(struct fb_info *info) + { + if (info->screen_base) + iounmap(info->screen_base); +- release_mem_region(info->aperture_base, info->aperture_size); ++ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); + framebuffer_release(info); + } + +@@ -295,8 +295,13 @@ static int __init vesafb_probe(struct platform_device *dev) + info->par = NULL; + + /* set vesafb aperture size for generic probing */ +- info->aperture_base = screen_info.lfb_base; +- info->aperture_size = size_total; ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ err = -ENOMEM; ++ goto err; ++ } ++ info->apertures->ranges[0].base = screen_info.lfb_base; ++ info->apertures->ranges[0].size = size_total; + + info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); + if (!info->screen_base) { +diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c +index bf638a4..149c47a 100644 +--- a/drivers/video/vga16fb.c ++++ b/drivers/video/vga16fb.c +@@ -1263,10 +1263,19 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image + vga_imageblit_color(info, image); + } + ++static void vga16fb_destroy(struct fb_info *info) ++{ ++ iounmap(info->screen_base); ++ fb_dealloc_cmap(&info->cmap); ++ /* XXX unshare VGA regions */ ++ framebuffer_release(info); ++} ++ + static struct fb_ops vga16fb_ops = { + .owner = THIS_MODULE, + .fb_open = vga16fb_open, + .fb_release = vga16fb_release, ++ .fb_destroy = vga16fb_destroy, + .fb_check_var = vga16fb_check_var, + .fb_set_par = vga16fb_set_par, + .fb_setcolreg = vga16fb_setcolreg, +@@ -1306,6 +1315,11 @@ static int __devinit vga16fb_probe(struct platform_device *dev) + ret = -ENOMEM; + goto err_fb_alloc; + } ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ ret = -ENOMEM; ++ goto err_ioremap; ++ } + + /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */ + info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0); +@@ -1335,7 +1349,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev) + info->fix = vga16fb_fix; + /* supports rectangles with widths of multiples of 8 */ + info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31; +- info->flags = FBINFO_FLAG_DEFAULT | ++ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE | + FBINFO_HWACCEL_YPAN; + + i = (info->var.bits_per_pixel == 8) ? 256 : 16; +@@ -1354,6 +1368,9 @@ static int __devinit vga16fb_probe(struct platform_device *dev) + + vga16fb_update_fix(info); + ++ info->apertures->ranges[0].base = VGA_FB_PHYS; ++ info->apertures->ranges[0].size = VGA_FB_PHYS_LEN; ++ + if (register_framebuffer(info) < 0) { + printk(KERN_ERR "vga16fb: unable to register framebuffer\n"); + ret = -EINVAL; +@@ -1380,13 +1397,8 @@ static int vga16fb_remove(struct platform_device *dev) + { + struct fb_info *info = platform_get_drvdata(dev); + +- if (info) { ++ if (info) + unregister_framebuffer(info); +- iounmap(info->screen_base); +- fb_dealloc_cmap(&info->cmap); +- /* XXX unshare VGA regions */ +- framebuffer_release(info); +- } + + return 0; + } +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index 2f3b3a0..c1b9871 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -1428,10 +1428,13 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector); + /* Graphics Execution Manager library functions (drm_gem.c) */ + int drm_gem_init(struct drm_device *dev); + void drm_gem_destroy(struct drm_device *dev); ++void drm_gem_object_release(struct drm_gem_object *obj); + void drm_gem_object_free(struct kref *kref); + void drm_gem_object_free_unlocked(struct kref *kref); + struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, + size_t size); ++int drm_gem_object_init(struct drm_device *dev, ++ struct drm_gem_object *obj, size_t size); + void drm_gem_object_handle_free(struct kref *kref); + void drm_gem_vm_open(struct vm_area_struct *vma); + void drm_gem_vm_close(struct vm_area_struct *vma); +diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h +index 1347524..93a1a31 100644 +--- a/include/drm/drm_crtc.h ++++ b/include/drm/drm_crtc.h +@@ -31,6 +31,7 @@ + #include + + #include ++#include + + struct drm_device; + struct drm_mode_set; +@@ -271,8 +272,6 @@ struct drm_framebuffer { + unsigned int depth; + int bits_per_pixel; + int flags; +- struct fb_info *fbdev; +- u32 pseudo_palette[17]; + struct list_head filp_head; + /* if you are using the helper */ + void *helper_private; +@@ -369,9 +368,6 @@ struct drm_crtc_funcs { + * @enabled: is this CRTC enabled? + * @x: x position on screen + * @y: y position on screen +- * @desired_mode: new desired mode +- * @desired_x: desired x for desired_mode +- * @desired_y: desired y for desired_mode + * @funcs: CRTC control functions + * + * Each CRTC may have one or more connectors associated with it. This structure +@@ -391,8 +387,6 @@ struct drm_crtc { + struct drm_display_mode mode; + + int x, y; +- struct drm_display_mode *desired_mode; +- int desired_x, desired_y; + const struct drm_crtc_funcs *funcs; + + /* CRTC gamma size for reporting to userspace */ +@@ -467,6 +461,15 @@ enum drm_connector_force { + DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ + }; + ++/* should we poll this connector for connects and disconnects */ ++/* hot plug detectable */ ++#define DRM_CONNECTOR_POLL_HPD (1 << 0) ++/* poll for connections */ ++#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) ++/* can cleanly poll for disconnections without flickering the screen */ ++/* DACs should rarely do this without a lot of testing */ ++#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) ++ + /** + * drm_connector - central DRM connector control structure + * @crtc: CRTC this connector is currently connected to, NULL if none +@@ -511,6 +514,8 @@ struct drm_connector { + u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; + uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; + ++ uint8_t polled; /* DRM_CONNECTOR_POLL_* */ ++ + /* requested DPMS state */ + int dpms; + +@@ -521,7 +526,6 @@ struct drm_connector { + uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; + uint32_t force_encoder_id; + struct drm_encoder *encoder; /* currently active encoder */ +- void *fb_helper_private; + }; + + /** +@@ -548,16 +552,10 @@ struct drm_mode_set { + + /** + * struct drm_mode_config_funcs - configure CRTCs for a given screen layout +- * @resize: adjust CRTCs as necessary for the proposed layout +- * +- * Currently only a resize hook is available. DRM will call back into the +- * driver with a new screen width and height. If the driver can't support +- * the proposed size, it can return false. Otherwise it should adjust +- * the CRTC<->connector mappings as needed and update its view of the screen. + */ + struct drm_mode_config_funcs { + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); +- int (*fb_changed)(struct drm_device *dev); ++ void (*output_poll_changed)(struct drm_device *dev); + }; + + struct drm_mode_group { +@@ -590,14 +588,15 @@ struct drm_mode_config { + + struct list_head property_list; + +- /* in-kernel framebuffers - hung of filp_head in drm_framebuffer */ +- struct list_head fb_kernel_list; +- + int min_width, min_height; + int max_width, max_height; + struct drm_mode_config_funcs *funcs; + resource_size_t fb_base; + ++ /* output poll support */ ++ bool poll_enabled; ++ struct delayed_slow_work output_poll_slow_work; ++ + /* pointers to standard properties */ + struct list_head property_blob_list; + struct drm_property *edid_property; +@@ -666,8 +665,6 @@ extern void drm_fb_release(struct drm_file *file_priv); + extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); + extern struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter); +-extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, +- unsigned char *buf, int len); + extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); + extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); + extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); +@@ -799,8 +796,14 @@ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, + extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool interlaced, int margins); ++extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, ++ int hdisplay, int vdisplay, int vrefresh, ++ bool interlaced, int margins, int GTF_M, ++ int GTF_2C, int GTF_K, int GTF_2J); + extern int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay); + + extern bool drm_edid_is_valid(struct edid *edid); ++struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, ++ int hsize, int vsize, int fresh); + #endif /* __DRM_CRTC_H__ */ +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index b29e201..1121f77 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -39,7 +39,6 @@ + + #include + +-#include "drm_fb_helper.h" + struct drm_crtc_helper_funcs { + /* + * Control power levels on the CRTC. If the mode passed in is +@@ -96,8 +95,6 @@ struct drm_connector_helper_funcs { + + extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); + extern void drm_helper_disable_unused_functions(struct drm_device *dev); +-extern int drm_helper_hotplug_stage_two(struct drm_device *dev); +-extern bool drm_helper_initial_config(struct drm_device *dev); + extern int drm_crtc_helper_set_config(struct drm_mode_set *set); + extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, +@@ -123,12 +120,17 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + encoder->helper_private = (void *)funcs; + } + +-static inline int drm_connector_helper_add(struct drm_connector *connector, ++static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) + { + connector->helper_private = (void *)funcs; +- return drm_fb_helper_add_connector(connector); + } + + extern int drm_helper_resume_force_mode(struct drm_device *dev); ++extern void drm_kms_helper_poll_init(struct drm_device *dev); ++extern void drm_kms_helper_poll_fini(struct drm_device *dev); ++extern void drm_helper_hpd_irq_event(struct drm_device *dev); ++ ++extern void drm_kms_helper_poll_disable(struct drm_device *dev); ++extern void drm_kms_helper_poll_enable(struct drm_device *dev); + #endif +diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h +index b420989..39e2cc5 100644 +--- a/include/drm/drm_edid.h ++++ b/include/drm/drm_edid.h +@@ -120,7 +120,7 @@ struct detailed_non_pixel { + struct detailed_data_string str; + struct detailed_data_monitor_range range; + struct detailed_data_wpindex color; +- struct std_timing timings[5]; ++ struct std_timing timings[6]; + struct cvt_timing cvt[4]; + } data; + } __attribute__((packed)); +@@ -201,7 +201,4 @@ struct edid { + + #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) + +-/* define the number of Extension EDID block */ +-#define DRM_MAX_EDID_EXT_NUM 4 +- + #endif /* __DRM_EDID_H__ */ +diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h +index 58c892a..f0a6afc 100644 +--- a/include/drm/drm_fb_helper.h ++++ b/include/drm/drm_fb_helper.h +@@ -30,17 +30,12 @@ + #ifndef DRM_FB_HELPER_H + #define DRM_FB_HELPER_H + ++struct drm_fb_helper; ++ + struct drm_fb_helper_crtc { + uint32_t crtc_id; + struct drm_mode_set mode_set; +-}; +- +- +-struct drm_fb_helper_funcs { +- void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, +- u16 blue, int regno); +- void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, +- u16 *blue, int regno); ++ struct drm_display_mode *desired_mode; + }; + + /* mode specified on the command line */ +@@ -57,8 +52,28 @@ struct drm_fb_helper_cmdline_mode { + bool margins; + }; + ++struct drm_fb_helper_surface_size { ++ u32 fb_width; ++ u32 fb_height; ++ u32 surface_width; ++ u32 surface_height; ++ u32 surface_bpp; ++ u32 surface_depth; ++}; ++ ++struct drm_fb_helper_funcs { ++ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, ++ u16 blue, int regno); ++ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, ++ u16 *blue, int regno); ++ ++ int (*fb_probe)(struct drm_fb_helper *helper, ++ struct drm_fb_helper_surface_size *sizes); ++}; ++ + struct drm_fb_helper_connector { + struct drm_fb_helper_cmdline_mode cmdline_mode; ++ struct drm_connector *connector; + }; + + struct drm_fb_helper { +@@ -67,24 +82,26 @@ struct drm_fb_helper { + struct drm_display_mode *mode; + int crtc_count; + struct drm_fb_helper_crtc *crtc_info; ++ int connector_count; ++ struct drm_fb_helper_connector **connector_info; + struct drm_fb_helper_funcs *funcs; + int conn_limit; ++ struct fb_info *fbdev; ++ u32 pseudo_palette[17]; + struct list_head kernel_fb_list; ++ ++ /* we got a hotplug but fbdev wasn't running the console ++ delay until next set_par */ ++ bool delayed_hotplug; + }; + +-int drm_fb_helper_single_fb_probe(struct drm_device *dev, +- int preferred_bpp, +- int (*fb_create)(struct drm_device *dev, +- uint32_t fb_width, +- uint32_t fb_height, +- uint32_t surface_width, +- uint32_t surface_height, +- uint32_t surface_depth, +- uint32_t surface_bpp, +- struct drm_framebuffer **fb_ptr)); +-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, +- int max_conn); +-void drm_fb_helper_free(struct drm_fb_helper *helper); ++int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper, ++ int preferred_bpp); ++ ++int drm_fb_helper_init(struct drm_device *dev, ++ struct drm_fb_helper *helper, int crtc_count, ++ int max_conn); ++void drm_fb_helper_fini(struct drm_fb_helper *helper); + int drm_fb_helper_blank(int blank, struct fb_info *info); + int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info); +@@ -99,13 +116,15 @@ int drm_fb_helper_setcolreg(unsigned regno, + struct fb_info *info); + + void drm_fb_helper_restore(void); +-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, ++void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height); + void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth); + +-int drm_fb_helper_add_connector(struct drm_connector *connector); +-int drm_fb_helper_parse_command_line(struct drm_device *dev); + int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); + ++bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); ++bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); ++int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); ++ + #endif +diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h +new file mode 100644 +index 0000000..4a08a66 +--- /dev/null ++++ b/include/drm/drm_fixed.h +@@ -0,0 +1,67 @@ ++/* ++ * Copyright 2009 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ */ ++#ifndef DRM_FIXED_H ++#define DRM_FIXED_H ++ ++typedef union dfixed { ++ u32 full; ++} fixed20_12; ++ ++ ++#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ ++#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) ++#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) ++#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) ++#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) ++#define dfixed_init(A) { .full = dfixed_const((A)) } ++#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } ++#define dfixed_trunc(A) ((A).full >> 12) ++ ++static inline u32 dfixed_floor(fixed20_12 A) ++{ ++ u32 non_frac = dfixed_trunc(A); ++ ++ return dfixed_const(non_frac); ++} ++ ++static inline u32 dfixed_ceil(fixed20_12 A) ++{ ++ u32 non_frac = dfixed_trunc(A); ++ ++ if (A.full > dfixed_const(non_frac)) ++ return dfixed_const(non_frac + 1); ++ else ++ return dfixed_const(non_frac); ++} ++ ++static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) ++{ ++ u64 tmp = ((u64)A.full << 13); ++ ++ do_div(tmp, B.full); ++ tmp += 1; ++ tmp /= 2; ++ return lower_32_bits(tmp); ++} ++#endif +diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h +index b64a8d7..7f0028e 100644 +--- a/include/drm/i915_drm.h ++++ b/include/drm/i915_drm.h +@@ -275,6 +275,7 @@ typedef struct drm_i915_irq_wait { + #define I915_PARAM_HAS_OVERLAY 7 + #define I915_PARAM_HAS_PAGEFLIPPING 8 + #define I915_PARAM_HAS_EXECBUF2 9 ++#define I915_PARAM_HAS_BSD 10 + + typedef struct drm_i915_getparam { + int param; +@@ -616,7 +617,9 @@ struct drm_i915_gem_execbuffer2 { + __u32 num_cliprects; + /** This is a struct drm_clip_rect *cliprects */ + __u64 cliprects_ptr; +- __u64 flags; /* currently unused */ ++#define I915_EXEC_RENDER (1<<0) ++#define I915_EXEC_BSD (1<<1) ++ __u64 flags; + __u64 rsvd1; + __u64 rsvd2; + }; +diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h +index a6a9f4a..fe917de 100644 +--- a/include/drm/nouveau_drm.h ++++ b/include/drm/nouveau_drm.h +@@ -79,6 +79,7 @@ struct drm_nouveau_gpuobj_free { + #define NOUVEAU_GETPARAM_CHIPSET_ID 11 + #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 + #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 ++#define NOUVEAU_GETPARAM_PTIMER_TIME 14 + struct drm_nouveau_getparam { + uint64_t param; + uint64_t value; +diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h +index 81e614b..5347063 100644 +--- a/include/drm/radeon_drm.h ++++ b/include/drm/radeon_drm.h +@@ -902,6 +902,8 @@ struct drm_radeon_cs { + #define RADEON_INFO_NUM_GB_PIPES 0x01 + #define RADEON_INFO_NUM_Z_PIPES 0x02 + #define RADEON_INFO_ACCEL_WORKING 0x03 ++#define RADEON_INFO_CRTC_FROM_ID 0x04 ++#define RADEON_INFO_ACCEL_WORKING2 0x05 + + struct drm_radeon_info { + uint32_t request; +diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h +index 81eb9f4..267a86c 100644 +--- a/include/drm/ttm/ttm_bo_api.h ++++ b/include/drm/ttm/ttm_bo_api.h +@@ -66,6 +66,26 @@ struct ttm_placement { + const uint32_t *busy_placement; + }; + ++/** ++ * struct ttm_bus_placement ++ * ++ * @addr: mapped virtual address ++ * @base: bus base address ++ * @is_iomem: is this io memory ? ++ * @size: size in byte ++ * @offset: offset from the base address ++ * ++ * Structure indicating the bus placement of an object. ++ */ ++struct ttm_bus_placement { ++ void *addr; ++ unsigned long base; ++ unsigned long size; ++ unsigned long offset; ++ bool is_iomem; ++ bool io_reserved; ++}; ++ + + /** + * struct ttm_mem_reg +@@ -75,6 +95,7 @@ struct ttm_placement { + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @placement: Placement flags. ++ * @bus: Placement on io bus accessible to the CPU + * + * Structure indicating the placement and space resources used by a + * buffer object. +@@ -87,6 +108,7 @@ struct ttm_mem_reg { + uint32_t page_alignment; + uint32_t mem_type; + uint32_t placement; ++ struct ttm_bus_placement bus; + }; + + /** +@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj { + ttm_bo_map_kmap = 3, + ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, + } bo_kmap_type; ++ struct ttm_buffer_object *bo; + }; + + /** +@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, + * @bo: The buffer object. + * @placement: Proposed placement for the buffer object. + * @interruptible: Sleep interruptible if sleeping. +- * @no_wait: Return immediately if the buffer is busy. ++ * @no_wait_reserve: Return immediately if other buffers are busy. ++ * @no_wait_gpu: Return immediately if the GPU is busy. + * + * Changes placement and caching policy of the buffer object + * according proposed placement. +@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, + */ + extern int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, +- bool interruptible, bool no_wait); ++ bool interruptible, bool no_wait_reserve, ++ bool no_wait_gpu); + + /** + * ttm_bo_unref +@@ -337,6 +362,23 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, + extern void ttm_bo_unref(struct ttm_buffer_object **bo); + + /** ++ * ttm_bo_lock_delayed_workqueue ++ * ++ * Prevent the delayed workqueue from running. ++ * Returns ++ * True if the workqueue was queued at the time ++ */ ++extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); ++ ++/** ++ * ttm_bo_unlock_delayed_workqueue ++ * ++ * Allows the delayed workqueue to run. ++ */ ++extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, ++ int resched); ++ ++/** + * ttm_bo_synccpu_write_grab + * + * @bo: The buffer object: +diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h +index 6b9db91..0ea602d 100644 +--- a/include/drm/ttm/ttm_bo_driver.h ++++ b/include/drm/ttm/ttm_bo_driver.h +@@ -176,8 +176,6 @@ struct ttm_tt { + + #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ + #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ +-#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap +- before kernel access. */ + #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ + + /** +@@ -189,13 +187,6 @@ struct ttm_tt { + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. +- * @io_offset: The io_offset of the first managed page of IO memory or +- * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA +- * memory, this should be set to NULL. +- * @io_size: The size of a managed IO region (fixed memory or aperture). +- * @io_addr: Virtual kernel address if the io region is pre-mapped. For +- * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and +- * @io_addr should be set to NULL. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h +@@ -221,9 +212,6 @@ struct ttm_mem_type_manager { + bool use_type; + uint32_t flags; + unsigned long gpu_offset; +- unsigned long io_offset; +- unsigned long io_size; +- void *io_addr; + uint64_t size; + uint32_t available_caching; + uint32_t default_caching; +@@ -311,7 +299,8 @@ struct ttm_bo_driver { + */ + int (*move) (struct ttm_buffer_object *bo, + bool evict, bool interruptible, +- bool no_wait, struct ttm_mem_reg *new_mem); ++ bool no_wait_reserve, bool no_wait_gpu, ++ struct ttm_mem_reg *new_mem); + + /** + * struct ttm_bo_driver_member verify_access +@@ -351,12 +340,21 @@ struct ttm_bo_driver { + struct ttm_mem_reg *new_mem); + /* notify the driver we are taking a fault on this BO + * and have reserved it */ +- void (*fault_reserve_notify)(struct ttm_buffer_object *bo); ++ int (*fault_reserve_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify) (struct ttm_buffer_object *bo); ++ ++ /** ++ * Driver callback on when mapping io memory (for bo_move_memcpy ++ * for instance). TTM will take care to call io_mem_free whenever ++ * the mapping is not use anymore. io_mem_reserve & io_mem_free ++ * are balanced. ++ */ ++ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); ++ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + }; + + /** +@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, + * @proposed_placement: Proposed new placement for the buffer object. + * @mem: A struct ttm_mem_reg. + * @interruptible: Sleep interruptible when sliping. +- * @no_wait: Don't sleep waiting for space to become available. ++ * @no_wait_reserve: Return immediately if other buffers are busy. ++ * @no_wait_gpu: Return immediately if the GPU is busy. + * + * Allocate memory space for the buffer object pointed to by @bo, using + * the placement flags in @mem, potentially evicting other idle buffer objects. +@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, + extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, +- bool interruptible, bool no_wait); ++ bool interruptible, ++ bool no_wait_reserve, bool no_wait_gpu); + /** + * ttm_bo_wait_for_cpu + * +@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, + unsigned long *bus_offset, + unsigned long *bus_size); + ++extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, ++ struct ttm_mem_reg *mem); ++extern void ttm_mem_io_free(struct ttm_bo_device *bdev, ++ struct ttm_mem_reg *mem); ++ + extern void ttm_bo_global_release(struct ttm_global_reference *ref); + extern int ttm_bo_global_init(struct ttm_global_reference *ref); + +@@ -798,7 +803,8 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, + * + * @bo: A pointer to a struct ttm_buffer_object. + * @evict: 1: This is an eviction. Don't try to pipeline. +- * @no_wait: Never sleep, but rather return with -EBUSY. ++ * @no_wait_reserve: Return immediately if other buffers are busy. ++ * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Optimized move function for a buffer object with both old and +@@ -812,15 +818,16 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, + */ + + extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, +- bool evict, bool no_wait, +- struct ttm_mem_reg *new_mem); ++ bool evict, bool no_wait_reserve, ++ bool no_wait_gpu, struct ttm_mem_reg *new_mem); + + /** + * ttm_bo_move_memcpy + * + * @bo: A pointer to a struct ttm_buffer_object. + * @evict: 1: This is an eviction. Don't try to pipeline. +- * @no_wait: Never sleep, but rather return with -EBUSY. ++ * @no_wait_reserve: Return immediately if other buffers are busy. ++ * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Fallback move function for a mappable buffer object in mappable memory. +@@ -834,8 +841,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + */ + + extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, +- bool evict, +- bool no_wait, struct ttm_mem_reg *new_mem); ++ bool evict, bool no_wait_reserve, ++ bool no_wait_gpu, struct ttm_mem_reg *new_mem); + + /** + * ttm_bo_free_old_node +@@ -854,7 +861,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); + * @sync_obj_arg: An argument to pass to the sync object idle / wait + * functions. + * @evict: This is an evict move. Don't return until the buffer is idle. +- * @no_wait: Never sleep, but rather return with -EBUSY. ++ * @no_wait_reserve: Return immediately if other buffers are busy. ++ * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_mem_reg indicating where to move. + * + * Accelerated move function to be called when an accelerated move +@@ -868,7 +876,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); + extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + void *sync_obj, + void *sync_obj_arg, +- bool evict, bool no_wait, ++ bool evict, bool no_wait_reserve, ++ bool no_wait_gpu, + struct ttm_mem_reg *new_mem); + /** + * ttm_io_prot +diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h +new file mode 100644 +index 0000000..8bb4de5 +--- /dev/null ++++ b/include/drm/ttm/ttm_page_alloc.h +@@ -0,0 +1,74 @@ ++/* ++ * Copyright (c) Red Hat Inc. ++ ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Jerome Glisse ++ */ ++#ifndef TTM_PAGE_ALLOC ++#define TTM_PAGE_ALLOC ++ ++#include "ttm_bo_driver.h" ++#include "ttm_memory.h" ++ ++/** ++ * Get count number of pages from pool to pages list. ++ * ++ * @pages: heado of empty linked list where pages are filled. ++ * @flags: ttm flags for page allocation. ++ * @cstate: ttm caching state for the page. ++ * @count: number of pages to allocate. ++ */ ++int ttm_get_pages(struct list_head *pages, ++ int flags, ++ enum ttm_caching_state cstate, ++ unsigned count); ++/** ++ * Put linked list of pages to pool. ++ * ++ * @pages: list of pages to free. ++ * @page_count: number of pages in the list. Zero can be passed for unknown ++ * count. ++ * @flags: ttm flags for page allocation. ++ * @cstate: ttm caching state. ++ */ ++void ttm_put_pages(struct list_head *pages, ++ unsigned page_count, ++ int flags, ++ enum ttm_caching_state cstate); ++/** ++ * Initialize pool allocator. ++ * ++ * Pool allocator is internaly reference counted so it can be initialized ++ * multiple times but ttm_page_alloc_fini has to be called same number of ++ * times. ++ */ ++int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); ++/** ++ * Free pool allocator. ++ */ ++void ttm_page_alloc_fini(void); ++ ++/** ++ * Output the state of pools to debugfs file ++ */ ++extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); ++#endif +diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h +index c7645f4..4d08423 100644 +--- a/include/drm/vmwgfx_drm.h ++++ b/include/drm/vmwgfx_drm.h +@@ -50,6 +50,8 @@ + #define DRM_VMW_EXECBUF 12 + #define DRM_VMW_FIFO_DEBUG 13 + #define DRM_VMW_FENCE_WAIT 14 ++/* guarded by minor version >= 2 */ ++#define DRM_VMW_UPDATE_LAYOUT 15 + + + /*************************************************************************/ +@@ -585,4 +587,28 @@ struct drm_vmw_stream_arg { + * sure that the stream has been stopped. + */ + ++/*************************************************************************/ ++/** ++ * DRM_VMW_UPDATE_LAYOUT - Update layout ++ * ++ * Updates the prefered modes and connection status for connectors. The ++ * command conisits of one drm_vmw_update_layout_arg pointing out a array ++ * of num_outputs drm_vmw_rect's. ++ */ ++ ++/** ++ * struct drm_vmw_update_layout_arg ++ * ++ * @num_outputs: number of active ++ * @rects: pointer to array of drm_vmw_rect ++ * ++ * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. ++ */ ++ ++struct drm_vmw_update_layout_arg { ++ uint32_t num_outputs; ++ uint32_t pad64; ++ uint64_t rects; ++}; ++ + #endif +diff --git a/include/linux/fb.h b/include/linux/fb.h +index c10163b..1296af4 100644 +--- a/include/linux/fb.h ++++ b/include/linux/fb.h +@@ -403,6 +403,7 @@ struct fb_cursor { + #include + #include + #include ++#include + #include + + struct vm_area_struct; +@@ -862,10 +863,22 @@ struct fb_info { + /* we need the PCI or similiar aperture base/size not + smem_start/size as smem_start may just be an object + allocated inside the aperture so may not actually overlap */ +- resource_size_t aperture_base; +- resource_size_t aperture_size; ++ struct apertures_struct { ++ unsigned int count; ++ struct aperture { ++ resource_size_t base; ++ resource_size_t size; ++ } ranges[0]; ++ } *apertures; + }; + ++static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { ++ struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) ++ + max_num * sizeof(struct aperture), GFP_KERNEL); ++ a->count = max_num; ++ return a; ++} ++ + #ifdef MODULE + #define FBINFO_DEFAULT FBINFO_MODULE + #else +@@ -958,6 +971,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, + /* drivers/video/fbmem.c */ + extern int register_framebuffer(struct fb_info *fb_info); + extern int unregister_framebuffer(struct fb_info *fb_info); ++extern void remove_conflicting_framebuffers(struct apertures_struct *a, ++ const char *name, bool primary); + extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); + extern int fb_show_logo(struct fb_info *fb_info, int rotate); + extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); +diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h +index 2dfaa29..c9a9759 100644 +--- a/include/linux/vgaarb.h ++++ b/include/linux/vgaarb.h +@@ -5,6 +5,27 @@ + * (C) Copyright 2005 Benjamin Herrenschmidt + * (C) Copyright 2007 Paulo R. Zanoni + * (C) Copyright 2007, 2009 Tiago Vignatti ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS ++ * IN THE SOFTWARE. ++ * + */ + + #ifndef LINUX_VGA_H diff --git a/drm-nouveau-updates.patch b/drm-nouveau-updates.patch new file mode 100644 index 000000000..a20516884 --- /dev/null +++ b/drm-nouveau-updates.patch @@ -0,0 +1,5903 @@ +From 06b70a657cec75d89c60243d6c49bc5dae0b5612 Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Mon, 31 May 2010 12:00:43 +1000 +Subject: [PATCH] drm-nouveau-updates + +drm/nouveau: reduce usage of fence spinlock to when absolutely necessary + +Signed-off-by: Ben Skeggs + +drm/nouveau: place notifiers in system memory by default + +Signed-off-by: Ben Skeggs + +drm/nouveau: move LVDS detection back to connector detect() time + +Signed-off-by: Ben Skeggs + +drm/nouveau: use drm_mm in preference to custom code doing the same thing + +Signed-off-by: Ben Skeggs + +drm/nouveau: remove left-over !DRIVER_MODESET paths + +It's far preferable to have the driver do nothing at all for "nomodeset". + +Signed-off-by: Ben Skeggs + +drm/nouveau: missed some braces + +Luckily this had absolutely no effect whatsoever :) + +Reported-by: Marcin Slusarz +Signed-off-by: Ben Skeggs + +drm/nv50: fix memory detection for cards with >=4GiB VRAM + +Signed-off-by: Ben Skeggs + +drm/nouveau: Put the dithering check back in nouveau_connector_create. + +a7b9f9e5adef dropped it by accident. + +Signed-off-by: Francisco Jerez +Tested-by: Thibaut Girka + +drm/nouveau: Don't clear AGPCMD completely on INIT_RESET. + +We just need to clear the SBA and ENABLE bits to reset the AGP +controller: If the AGP bridge was configured to use "fast writes", +clearing the FW bit would break the subsequent MMIO writes and +eventually end with a lockup. + +Note that all the BIOSes I've seen do the same as we did (it works for +them because they don't use MMIO), OTOH the blob leaves FW untouched. + +Signed-off-by: Francisco Jerez + +drm/nouveau: Ignore broken legacy I2C entries. + +The nv05 card in the bug report [1] doesn't have usable I2C port +register offsets (they're all filled with zeros). Ignore them and use +the defaults. + +[1] http://bugs.launchpad.net/bugs/569505 + +Signed-off-by: Francisco Jerez + +drm/nouveau: set encoder for lvds + +fixes oops in nouveau_connector_get_modes with nv_encoder is NULL + +Signed-off-by: Albert Damen +Signed-off-by: Francisco Jerez + +drm/nouveau: tidy connector/encoder creation a little + +Create connectors before encoders to avoid having to do another loop across +encoder list whenever we create a new connector. This allows us to pass +the connector to the encoder creation functions, and avoid using a +create_resources() callback since we can now call it directly. + +This can also potentially modify the connector ordering on nv50. On cards +where the DCB connector and encoder tables are in the same order, things +will be unchanged. However, there's some cards where the ordering between +the tables differ, and in one case, leads us to naming the connectors +"wrongly". + +Signed-off-by: Ben Skeggs + +drm/nouveau: downgrade severity of most init table parser errors + +As long as we know the length of the opcode, we're probably better off +trying to parse the remainder of an init table rather than aborting in +the middle of it. + +Signed-off-by: Ben Skeggs + +drm/nv50: fix DP->DVI if output has been programmed for native DP previously + +Signed-off-by: Ben Skeggs + +drm/nv50: DCB quirk for Dell M6300 + +Uncertain if this is a weirdo configuration, or a BIOS bug. If it's not +a BIOS bug, we still don't know how to make it work anyway so ignore a +"conflicting" DCB entry to prevent a display hang. + +Signed-off-by: Ben Skeggs + +drm/nv50: supply encoder disable() hook for SOR outputs + +Allows us to remove a driver hack that used to be necessary to disable +encoders in certain situations before setting up a mode. The DRM has +better knowledge of when this is needed than the driver does. + +This fixes a number of display switching issues. + +Signed-off-by: Ben Skeggs + +drm/nv50: fix regression caused by ed15e77b6ee7c4fa6f50c18b3325e7f96ed3aade + +It became possible for us to have connectors present without any encoders +attached (TV out, we don't support TVDAC yet), which caused the DDX to +segfault. + +Signed-off-by: Ben Skeggs + +drm/nv04: fix regression caused by ed15e77b6ee7c4fa6f50c18b3325e7f96ed3aade + +Signed-off-by: Ben Skeggs + +drm/nv50: when debugging on, log which crtc we connect an encoder to + +Signed-off-by: Ben Skeggs + +drm/nv17-nv40: Avoid using active CRTCs for load detection. + +Signed-off-by: Francisco Jerez + +drm/nv04-nv40: Prevent invalid DAC/TVDAC combinations. + +Signed-off-by: Francisco Jerez + +drm/nouveau: Fix a couple of sparse warnings. + +Signed-off-by: Francisco Jerez + +drm/nouveau: INIT_CONFIGURE_PREINIT/CLK/MEM on newer BIOSes is not an error. + +No need to spam the logs when they're found, they're equivalent to +INIT_DONE. + +Signed-off-by: Francisco Jerez + +drm/nv04-nv40: Drop redundant logging. + +Signed-off-by: Francisco Jerez + +drm/nouveau: Move the fence wait before migration resource clean-up. + +Avoids an oops in the fence wait failure path (bug 26521). + +Signed-off-by: Francisco Jerez +Tested-by: Marcin Slusarz + +drm/nouveau: Workaround broken TV load detection on a "Zotac FX5200". + +The blob seems to have the same problem so it's probably a hardware +issue (bug 28810). + +Signed-off-by: Francisco Jerez + +drm/nv50: send evo "update" command after each disconnect + +It turns out that the display engine signals an interrupt for disconnects +too. In order to make it easier to process the display interrupts +correctly, we want to ensure we only get one operation per interrupt +sequence - this is what this commit achieves. + +Signed-off-by: Ben Skeggs + +drm/nv50: rewrite display irq handler + +The previous handler basically worked correctly for a full-blown mode +change. However, it did nothing at all when a partial (encoder only) +reconfiguation was necessary, leading to the display hanging on certain +types of mode switch. + +Signed-off-by: Ben Skeggs + +drm/nouveau: move DP script invocation to nouveau_dp.c + +Signed-off-by: Ben Skeggs + +drm/nv50: set DP display power state during DPMS + +Signed-off-by: Ben Skeggs + +drm/nouveau: add scaler-only modes for eDP too + +Signed-off-by: Ben Skeggs + +drm/nouveau: remove dev_priv->init_state and friends + +Nouveau will no longer load at all if card initialisation fails, so all +these checks are unnecessary. + +Signed-off-by: Ben Skeggs + +drm/nv50: implement DAC disconnect fix missed in earlier commit + +Signed-off-by: Ben Skeggs + +drm/nouveau: add instmem flush() hook + +This removes the previous prepare_access() and finish_access() hooks, and +replaces it with a much simpler flush() hook. + +All the chipset-specific code before nv50 has its use removed completely, +as it's not required there at all. + +Signed-off-by: Ben Skeggs + +drm/nv50: move tlb flushing to a helper function + +Signed-off-by: Ben Skeggs + +drm/nouveau: remove ability to use external firmware + +This was always really a developer option, and if it's really necessary we +can hack this in ourselves. + +Signed-off-by: Ben Skeggs + +drm/nouveau: allocate fixed amount of PRAMIN per channel on all chipsets + +Previously only done on nv50+ + +This commit also switches unknown NV2x/NV3x chipsets to noaccel mode. + +Signed-off-by: Ben Skeggs + +drm/nouveau: remove unused fbdev_info + +Signed-off-by: Ben Skeggs + +drm/nv50: cleanup nv50_fifo.c + +Signed-off-by: Ben Skeggs + +drm/nv20-nv30: move context table object out of dev_priv + +Signed-off-by: Ben Skeggs + +drm/nv50: fix dp_set_tmds to work on the right OR + +Signed-off-by: Ben Skeggs + +drm/nouveau: fix mtrr cleanup path + +Signed-off-by: Ben Skeggs + +drm/nv50: move dp_set_tmds() function to happen in the last display irq + +It seems on some chipsets that doing this from the 0x20 handler causes the +display engine to not ever signal the final 0x40 stage. + +Signed-off-by: Ben Skeggs + +drm/nouveau: initialise display before enabling interrupts + +In some situations it's possible we can receive a spurious hotplug IRQ +before we're ready to handle it, leading to an oops. + +Calling the display init before enabling interrupts should clear any +pending IRQs on the GPU and prevent this from happening. + +Signed-off-by: Ben Skeggs + +drm/nouveau: Fix crashes during fbcon init on single head cards. + +Signed-off-by: Francisco Jerez + +drm/nouveau: Disable PROM access on init. + +On older cards ( + +drm/nv04: Enable context switching on PFIFO init. + +Fixes a lockup when coming back from suspend. + +Signed-off-by: Francisco Jerez + +drm/nouveau: fix pcirom vbios shadow breakage from acpi rom patch + +On nv50 it became impossible to attempt a PCI ROM shadow of the VBIOS, +which will break some setups. + +This patch also removes the different ordering of shadow methods for +pre-nv50 chipsets. The reason for the different ordering was paranoia, +but it should hopefully be OK to try shadowing PRAMIN first. + +Signed-off-by: Ben Skeggs + +drm/nv50: fix RAMHT size + +Signed-off-by: Ben Skeggs + +drm/nouveau: remove quirk to fabricate DVI-A output on DCB 1.5 boards + +Signed-off-by: Ben Skeggs + +drm/nouveau: support fetching LVDS EDID from ACPI + +Based on a patch from Matthew Garrett. + +Signed-off-by: Ben Skeggs + +drm/nv50: fix regression that break LVDS in some places + +A previous commit started additionally using the SOR link when trying to +match the correct output script. However, we never fill in this field +for LVDS so we can never match a script at all. + +Signed-off-by: Ben Skeggs +--- + drivers/gpu/drm/nouveau/Makefile | 2 +- + drivers/gpu/drm/nouveau/nouveau_acpi.c | 38 +++- + drivers/gpu/drm/nouveau/nouveau_bios.c | 206 +++++++++------ + drivers/gpu/drm/nouveau/nouveau_bios.h | 2 + + drivers/gpu/drm/nouveau/nouveau_bo.c | 9 +- + drivers/gpu/drm/nouveau/nouveau_channel.c | 5 - + drivers/gpu/drm/nouveau/nouveau_connector.c | 281 ++++++++++---------- + drivers/gpu/drm/nouveau/nouveau_connector.h | 4 +- + drivers/gpu/drm/nouveau/nouveau_dma.c | 8 +- + drivers/gpu/drm/nouveau/nouveau_dp.c | 24 ++- + drivers/gpu/drm/nouveau/nouveau_drv.c | 26 +-- + drivers/gpu/drm/nouveau/nouveau_drv.h | 89 ++----- + drivers/gpu/drm/nouveau/nouveau_encoder.h | 10 +- + drivers/gpu/drm/nouveau/nouveau_fbcon.c | 5 +- + drivers/gpu/drm/nouveau/nouveau_fence.c | 31 +-- + drivers/gpu/drm/nouveau/nouveau_gem.c | 11 +- + drivers/gpu/drm/nouveau/nouveau_grctx.c | 160 ----------- + drivers/gpu/drm/nouveau/nouveau_mem.c | 261 ++----------------- + drivers/gpu/drm/nouveau/nouveau_notifier.c | 30 +-- + drivers/gpu/drm/nouveau/nouveau_object.c | 105 +++----- + drivers/gpu/drm/nouveau/nouveau_reg.h | 1 + + drivers/gpu/drm/nouveau/nouveau_sgdma.c | 46 +--- + drivers/gpu/drm/nouveau/nouveau_state.c | 172 ++++-------- + drivers/gpu/drm/nouveau/nv04_dac.c | 37 ++- + drivers/gpu/drm/nouveau/nv04_dfp.c | 12 +- + drivers/gpu/drm/nouveau/nv04_display.c | 23 ++- + drivers/gpu/drm/nouveau/nv04_fifo.c | 20 +- + drivers/gpu/drm/nouveau/nv04_graph.c | 5 +- + drivers/gpu/drm/nouveau/nv04_instmem.c | 21 +- + drivers/gpu/drm/nouveau/nv04_mc.c | 4 + + drivers/gpu/drm/nouveau/nv04_tv.c | 8 +- + drivers/gpu/drm/nouveau/nv10_fifo.c | 10 - + drivers/gpu/drm/nouveau/nv17_tv.c | 45 +++- + drivers/gpu/drm/nouveau/nv20_graph.c | 96 ++++--- + drivers/gpu/drm/nouveau/nv40_fifo.c | 8 - + drivers/gpu/drm/nouveau/nv40_graph.c | 58 ++--- + drivers/gpu/drm/nouveau/nv50_crtc.c | 42 +--- + drivers/gpu/drm/nouveau/nv50_dac.c | 43 ++- + drivers/gpu/drm/nouveau/nv50_display.c | 385 ++++++++++++++++----------- + drivers/gpu/drm/nouveau/nv50_fifo.c | 126 ++++------ + drivers/gpu/drm/nouveau/nv50_graph.c | 86 +++---- + drivers/gpu/drm/nouveau/nv50_instmem.c | 61 ++--- + drivers/gpu/drm/nouveau/nv50_sor.c | 105 ++++---- + 43 files changed, 1123 insertions(+), 1598 deletions(-) + delete mode 100644 drivers/gpu/drm/nouveau/nouveau_grctx.c + +diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile +index acd31ed..4a1db73 100644 +--- a/drivers/gpu/drm/nouveau/Makefile ++++ b/drivers/gpu/drm/nouveau/Makefile +@@ -9,7 +9,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ + nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ + nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ + nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ +- nouveau_dp.o nouveau_grctx.o \ ++ nouveau_dp.o \ + nv04_timer.o \ + nv04_mc.o nv40_mc.o nv50_mc.o \ + nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ +diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c +index d4bcca8..c17a055 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c ++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c +@@ -3,6 +3,7 @@ + #include + #include + #include ++#include + + #include "drmP.h" + #include "drm.h" +@@ -11,6 +12,7 @@ + #include "nouveau_drv.h" + #include "nouveau_drm.h" + #include "nv50_display.h" ++#include "nouveau_connector.h" + + #include + +@@ -42,7 +44,7 @@ static const char nouveau_dsm_muid[] = { + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, + }; + +-static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result) ++static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) + { + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; +@@ -259,3 +261,37 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) + { + return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); + } ++ ++int ++nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) ++{ ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct acpi_device *acpidev; ++ acpi_handle handle; ++ int type, ret; ++ void *edid; ++ ++ switch (connector->connector_type) { ++ case DRM_MODE_CONNECTOR_LVDS: ++ case DRM_MODE_CONNECTOR_eDP: ++ type = ACPI_VIDEO_DISPLAY_LCD; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); ++ if (!handle) ++ return -ENODEV; ++ ++ ret = acpi_bus_get_device(handle, &acpidev); ++ if (ret) ++ return -ENODEV; ++ ++ ret = acpi_video_get_edid(acpidev, type, -1, &edid); ++ if (ret < 0) ++ return ret; ++ ++ nv_connector->edid = edid; ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c +index fc924b6..0eb1b5a 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -203,36 +203,26 @@ struct methods { + const bool rw; + }; + +-static struct methods nv04_methods[] = { +- { "PROM", load_vbios_prom, false }, +- { "PRAMIN", load_vbios_pramin, true }, +- { "PCIROM", load_vbios_pci, true }, +-}; +- +-static struct methods nv50_methods[] = { +- { "ACPI", load_vbios_acpi, true }, ++static struct methods shadow_methods[] = { + { "PRAMIN", load_vbios_pramin, true }, + { "PROM", load_vbios_prom, false }, + { "PCIROM", load_vbios_pci, true }, ++ { "ACPI", load_vbios_acpi, true }, + }; + +-#define METHODCNT 3 +- + static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct methods *methods; +- int i; ++ const int nr_methods = ARRAY_SIZE(shadow_methods); ++ struct methods *methods = shadow_methods; + int testscore = 3; +- int scores[METHODCNT]; ++ int scores[nr_methods], i; + + if (nouveau_vbios) { +- methods = nv04_methods; +- for (i = 0; i < METHODCNT; i++) ++ for (i = 0; i < nr_methods; i++) + if (!strcasecmp(nouveau_vbios, methods[i].desc)) + break; + +- if (i < METHODCNT) { ++ if (i < nr_methods) { + NV_INFO(dev, "Attempting to use BIOS image from %s\n", + methods[i].desc); + +@@ -244,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) + NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); + } + +- if (dev_priv->card_type < NV_50) +- methods = nv04_methods; +- else +- methods = nv50_methods; +- +- for (i = 0; i < METHODCNT; i++) { ++ for (i = 0; i < nr_methods; i++) { + NV_TRACE(dev, "Attempting to load BIOS image from %s\n", + methods[i].desc); + data[0] = data[1] = 0; /* avoid reuse of previous image */ +@@ -260,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) + } + + while (--testscore > 0) { +- for (i = 0; i < METHODCNT; i++) { ++ for (i = 0; i < nr_methods; i++) { + if (scores[i] == testscore) { + NV_TRACE(dev, "Using BIOS image from %s\n", + methods[i].desc); +@@ -935,7 +920,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); +- return -EINVAL; ++ return len; + } + + configval = ROM32(bios->data[offset + 11 + config * 4]); +@@ -1037,7 +1022,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); +- return -EINVAL; ++ return len; + } + + freq = ROM16(bios->data[offset + 12 + config * 2]); +@@ -1209,7 +1194,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + dpe = nouveau_bios_dp_table(dev, dcb, &dummy); + if (!dpe) { + NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); +- return -EINVAL; ++ return 3; + } + + switch (cond) { +@@ -1233,12 +1218,16 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + int ret; + + auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); +- if (!auxch) +- return -ENODEV; ++ if (!auxch) { ++ NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset); ++ return 3; ++ } + + ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); +- if (ret) +- return ret; ++ if (ret) { ++ NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret); ++ return 3; ++ } + + if (cond & 1) + iexec->execute = false; +@@ -1407,7 +1396,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); +- return -EINVAL; ++ return len; + } + + freq = ROM32(bios->data[offset + 11 + config * 4]); +@@ -1467,6 +1456,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + * "mask n" and OR it with "data n" before writing it back to the device + */ + ++ struct drm_device *dev = bios->dev; + uint8_t i2c_index = bios->data[offset + 1]; + uint8_t i2c_address = bios->data[offset + 2] >> 1; + uint8_t count = bios->data[offset + 3]; +@@ -1481,9 +1471,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + "Count: 0x%02X\n", + offset, i2c_index, i2c_address, count); + +- chan = init_i2c_device_find(bios->dev, i2c_index); +- if (!chan) +- return -ENODEV; ++ chan = init_i2c_device_find(dev, i2c_index); ++ if (!chan) { ++ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); ++ return len; ++ } + + for (i = 0; i < count; i++) { + uint8_t reg = bios->data[offset + 4 + i * 3]; +@@ -1494,8 +1486,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, + I2C_SMBUS_READ, reg, + I2C_SMBUS_BYTE_DATA, &val); +- if (ret < 0) +- return ret; ++ if (ret < 0) { ++ NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret); ++ return len; ++ } + + BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " + "Mask: 0x%02X, Data: 0x%02X\n", +@@ -1509,8 +1503,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &val); +- if (ret < 0) +- return ret; ++ if (ret < 0) { ++ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); ++ return len; ++ } + } + + return len; +@@ -1535,6 +1531,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + * "DCB I2C table entry index", set the register to "data n" + */ + ++ struct drm_device *dev = bios->dev; + uint8_t i2c_index = bios->data[offset + 1]; + uint8_t i2c_address = bios->data[offset + 2] >> 1; + uint8_t count = bios->data[offset + 3]; +@@ -1549,9 +1546,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + "Count: 0x%02X\n", + offset, i2c_index, i2c_address, count); + +- chan = init_i2c_device_find(bios->dev, i2c_index); +- if (!chan) +- return -ENODEV; ++ chan = init_i2c_device_find(dev, i2c_index); ++ if (!chan) { ++ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); ++ return len; ++ } + + for (i = 0; i < count; i++) { + uint8_t reg = bios->data[offset + 4 + i * 2]; +@@ -1568,8 +1567,10 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &val); +- if (ret < 0) +- return ret; ++ if (ret < 0) { ++ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); ++ return len; ++ } + } + + return len; +@@ -1592,6 +1593,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + * address" on the I2C bus given by "DCB I2C table entry index" + */ + ++ struct drm_device *dev = bios->dev; + uint8_t i2c_index = bios->data[offset + 1]; + uint8_t i2c_address = bios->data[offset + 2] >> 1; + uint8_t count = bios->data[offset + 3]; +@@ -1599,7 +1601,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + struct nouveau_i2c_chan *chan; + struct i2c_msg msg; + uint8_t data[256]; +- int i; ++ int ret, i; + + if (!iexec->execute) + return len; +@@ -1608,9 +1610,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + "Count: 0x%02X\n", + offset, i2c_index, i2c_address, count); + +- chan = init_i2c_device_find(bios->dev, i2c_index); +- if (!chan) +- return -ENODEV; ++ chan = init_i2c_device_find(dev, i2c_index); ++ if (!chan) { ++ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); ++ return len; ++ } + + for (i = 0; i < count; i++) { + data[i] = bios->data[offset + 4 + i]; +@@ -1623,8 +1627,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + msg.flags = 0; + msg.len = count; + msg.buf = data; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return -EIO; ++ ret = i2c_transfer(&chan->adapter, &msg, 1); ++ if (ret != 1) { ++ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); ++ return len; ++ } + } + + return len; +@@ -1648,6 +1655,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + * used -- see get_tmds_index_reg() + */ + ++ struct drm_device *dev = bios->dev; + uint8_t mlv = bios->data[offset + 1]; + uint32_t tmdsaddr = bios->data[offset + 2]; + uint8_t mask = bios->data[offset + 3]; +@@ -1662,8 +1670,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + offset, mlv, tmdsaddr, mask, data); + + reg = get_tmds_index_reg(bios->dev, mlv); +- if (!reg) +- return -EINVAL; ++ if (!reg) { ++ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset); ++ return 5; ++ } + + bios_wr32(bios, reg, + tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); +@@ -1693,6 +1703,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, + * register is used -- see get_tmds_index_reg() + */ + ++ struct drm_device *dev = bios->dev; + uint8_t mlv = bios->data[offset + 1]; + uint8_t count = bios->data[offset + 2]; + int len = 3 + count * 2; +@@ -1706,8 +1717,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, + offset, mlv, count); + + reg = get_tmds_index_reg(bios->dev, mlv); +- if (!reg) +- return -EINVAL; ++ if (!reg) { ++ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset); ++ return len; ++ } + + for (i = 0; i < count; i++) { + uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; +@@ -2146,7 +2159,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + /* no iexec->execute check by design */ + + pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19); +- bios_wr32(bios, NV_PBUS_PCI_NV_19, 0); ++ bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00); ++ + bios_wr32(bios, reg, value1); + + udelay(10); +@@ -2182,7 +2196,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, + uint32_t reg, data; + + if (bios->major_version > 2) +- return -ENODEV; ++ return 0; + + bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( + bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); +@@ -2237,7 +2251,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, + int clock; + + if (bios->major_version > 2) +- return -ENODEV; ++ return 0; + + clock = ROM16(bios->data[meminitoffs + 4]) * 10; + setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); +@@ -2270,7 +2284,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, + uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); + + if (bios->major_version > 2) +- return -ENODEV; ++ return 0; + + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, + NV_CIO_CRE_SCRATCH4__INDEX, cr3c); +@@ -2815,7 +2829,7 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + if (dev_priv->card_type != NV_50) { + NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); +- return -ENODEV; ++ return 1; + } + + if (!iexec->execute) +@@ -2887,10 +2901,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, + uint8_t index; + int i; + +- +- if (!iexec->execute) +- return len; +- ++ /* critical! to know the length of the opcode */; + if (!blocklen) { + NV_ERROR(bios->dev, + "0x%04X: Zero block length - has the M table " +@@ -2898,6 +2909,9 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, + return -EINVAL; + } + ++ if (!iexec->execute) ++ return len; ++ + strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; + index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg]; + +@@ -3079,14 +3093,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + if (!bios->display.output) { + NV_ERROR(dev, "INIT_AUXCH: no active output\n"); +- return -EINVAL; ++ return len; + } + + auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); + if (!auxch) { + NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", + bios->display.output->i2c_index); +- return -ENODEV; ++ return len; + } + + if (!iexec->execute) +@@ -3099,7 +3113,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); + if (ret) { + NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); +- return ret; ++ return len; + } + + data &= bios->data[offset + 0]; +@@ -3108,7 +3122,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); + if (ret) { + NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); +- return ret; ++ return len; + } + } + +@@ -3138,14 +3152,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + if (!bios->display.output) { + NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); +- return -EINVAL; ++ return len; + } + + auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); + if (!auxch) { + NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", + bios->display.output->i2c_index); +- return -ENODEV; ++ return len; + } + + if (!iexec->execute) +@@ -3156,7 +3170,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); + if (ret) { + NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); +- return ret; ++ return len; + } + } + +@@ -5166,10 +5180,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi + bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; + bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; + bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; +- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; +- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; +- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; +- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; ++ if (bios->data[legacy_i2c_offset + 4]) ++ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; ++ if (bios->data[legacy_i2c_offset + 5]) ++ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; ++ if (bios->data[legacy_i2c_offset + 6]) ++ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; ++ if (bios->data[legacy_i2c_offset + 7]) ++ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; + + if (bmplength > 74) { + bios->fmaxvco = ROM32(bmp[67]); +@@ -5604,9 +5622,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, + if (conf & 0x4 || conf & 0x8) + entry->lvdsconf.use_power_scripts = true; + } else { +- mask = ~0x5; ++ mask = ~0x7; ++ if (conf & 0x2) ++ entry->lvdsconf.use_acpi_for_edid = true; + if (conf & 0x4) + entry->lvdsconf.use_power_scripts = true; ++ entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; + } + if (conf & mask) { + /* +@@ -5721,13 +5742,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, + case OUTPUT_TV: + entry->tvconf.has_component_output = false; + break; +- case OUTPUT_TMDS: +- /* +- * Invent a DVI-A output, by copying the fields of the DVI-D +- * output; reported to work by math_b on an NV20(!). +- */ +- fabricate_vga_output(dcb, entry->i2c_index, entry->heads); +- break; + case OUTPUT_LVDS: + if ((conn & 0x00003f00) != 0x10) + entry->lvdsconf.use_straps_for_mode = true; +@@ -5808,6 +5822,31 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) + dcb->entries = newentries; + } + ++static bool ++apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) ++{ ++ /* Dell Precision M6300 ++ * DCB entry 2: 02025312 00000010 ++ * DCB entry 3: 02026312 00000020 ++ * ++ * Identical, except apparently a different connector on a ++ * different SOR link. Not a clue how we're supposed to know ++ * which one is in use if it even shares an i2c line... ++ * ++ * Ignore the connector on the second SOR link to prevent ++ * nasty problems until this is sorted (assuming it's not a ++ * VBIOS bug). ++ */ ++ if ((dev->pdev->device == 0x040d) && ++ (dev->pdev->subsystem_vendor == 0x1028) && ++ (dev->pdev->subsystem_device == 0x019b)) { ++ if (*conn == 0x02026312 && *conf == 0x00000020) ++ return false; ++ } ++ ++ return true; ++} ++ + static int + parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + { +@@ -5941,6 +5980,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + if ((connection & 0x0000000f) == 0x0000000f) + continue; + ++ if (!apply_dcb_encoder_quirks(dev, i, &connection, &config)) ++ continue; ++ + NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", + dcb->entries, connection, config); + +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h +index adf4ec2..cc52aec 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.h ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h +@@ -81,6 +81,7 @@ struct dcb_connector_table_entry { + enum dcb_connector_type type; + uint8_t index2; + uint8_t gpio_tag; ++ void *drm; + }; + + struct dcb_connector_table { +@@ -117,6 +118,7 @@ struct dcb_entry { + struct { + struct sor_conf sor; + bool use_straps_for_mode; ++ bool use_acpi_for_edid; + bool use_power_scripts; + } lvdsconf; + struct { +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c +index 6f3c195..d8c341d 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bo.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c +@@ -461,9 +461,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, + return ret; + + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, +- evict, no_wait_reserve, no_wait_gpu, new_mem); +- if (nvbo->channel && nvbo->channel != chan) +- ret = nouveau_fence_wait(fence, NULL, false, false); ++ evict || (nvbo->channel && ++ nvbo->channel != chan), ++ no_wait_reserve, no_wait_gpu, new_mem); + nouveau_fence_unref((void *)&fence); + return ret; + } +@@ -711,8 +711,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, + return ret; + + /* Software copy if the card isn't up and running yet. */ +- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || +- !dev_priv->channel) { ++ if (!dev_priv->channel) { + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + goto out; + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c +index 1fc57ef..e952c3b 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_channel.c ++++ b/drivers/gpu/drm/nouveau/nouveau_channel.c +@@ -257,9 +257,7 @@ nouveau_channel_free(struct nouveau_channel *chan) + nouveau_debugfs_channel_fini(chan); + + /* Give outstanding push buffers a chance to complete */ +- spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); +- spin_unlock_irqrestore(&chan->fence.lock, flags); + if (chan->fence.sequence != chan->fence.sequence_ack) { + struct nouveau_fence *fence = NULL; + +@@ -368,8 +366,6 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, + struct nouveau_channel *chan; + int ret; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + if (dev_priv->engine.graph.accel_blocked) + return -ENODEV; + +@@ -418,7 +414,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, + struct drm_nouveau_channel_free *cfree = data; + struct nouveau_channel *chan; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); + + nouveau_channel_free(chan); +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index 9a61f3c..2914dd9 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -236,20 +236,6 @@ nouveau_connector_detect(struct drm_connector *connector) + struct nouveau_i2c_chan *i2c; + int type, flags; + +- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS) +- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); +- if (nv_encoder && nv_connector->native_mode) { +- unsigned status = connector_status_connected; +- +-#if defined(CONFIG_ACPI_BUTTON) || \ +- (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) +- if (!nouveau_ignorelid && !acpi_lid_open()) +- status = connector_status_unknown; +-#endif +- nouveau_connector_set_encoder(connector, nv_encoder); +- return status; +- } +- + /* Cleanup the previous EDID block. */ + if (nv_connector->edid) { + drm_mode_connector_update_edid_property(connector, NULL); +@@ -321,6 +307,85 @@ detect_analog: + return connector_status_disconnected; + } + ++static enum drm_connector_status ++nouveau_connector_detect_lvds(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct nouveau_encoder *nv_encoder = NULL; ++ enum drm_connector_status status = connector_status_disconnected; ++ ++ /* Cleanup the previous EDID block. */ ++ if (nv_connector->edid) { ++ drm_mode_connector_update_edid_property(connector, NULL); ++ kfree(nv_connector->edid); ++ nv_connector->edid = NULL; ++ } ++ ++ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); ++ if (!nv_encoder) ++ return connector_status_disconnected; ++ ++ /* Try retrieving EDID via DDC */ ++ if (!dev_priv->vbios.fp_no_ddc) { ++ status = nouveau_connector_detect(connector); ++ if (status == connector_status_connected) ++ goto out; ++ } ++ ++ /* On some laptops (Sony, i'm looking at you) there appears to ++ * be no direct way of accessing the panel's EDID. The only ++ * option available to us appears to be to ask ACPI for help.. ++ * ++ * It's important this check's before trying straps, one of the ++ * said manufacturer's laptops are configured in such a way ++ * the nouveau decides an entry in the VBIOS FP mode table is ++ * valid - it's not (rh#613284) ++ */ ++ if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { ++ if (!nouveau_acpi_edid(dev, connector)) { ++ status = connector_status_connected; ++ goto out; ++ } ++ } ++ ++ /* If no EDID found above, and the VBIOS indicates a hardcoded ++ * modeline is avalilable for the panel, set it as the panel's ++ * native mode and exit. ++ */ ++ if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc || ++ nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { ++ status = connector_status_connected; ++ goto out; ++ } ++ ++ /* Still nothing, some VBIOS images have a hardcoded EDID block ++ * stored for the panel stored in them. ++ */ ++ if (!dev_priv->vbios.fp_no_ddc) { ++ struct edid *edid = ++ (struct edid *)nouveau_bios_embedded_edid(dev); ++ if (edid) { ++ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); ++ *(nv_connector->edid) = *edid; ++ status = connector_status_connected; ++ } ++ } ++ ++out: ++#if defined(CONFIG_ACPI_BUTTON) || \ ++ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) ++ if (status == connector_status_connected && ++ !nouveau_ignorelid && !acpi_lid_open()) ++ status = connector_status_unknown; ++#endif ++ ++ drm_mode_connector_update_edid_property(connector, nv_connector->edid); ++ nouveau_connector_set_encoder(connector, nv_encoder); ++ return status; ++} ++ + static void + nouveau_connector_force(struct drm_connector *connector) + { +@@ -534,21 +599,27 @@ static int + nouveau_connector_get_modes(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + int ret = 0; + +- /* If we're not LVDS, destroy the previous native mode, the attached +- * monitor could have changed. ++ /* destroy the native mode, the attached monitor could have changed. + */ +- if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS && +- nv_connector->native_mode) { ++ if (nv_connector->native_mode) { + drm_mode_destroy(dev, nv_connector->native_mode); + nv_connector->native_mode = NULL; + } + + if (nv_connector->edid) + ret = drm_add_edid_modes(connector, nv_connector->edid); ++ else ++ if (nv_encoder->dcb->type == OUTPUT_LVDS && ++ (nv_encoder->dcb->lvdsconf.use_straps_for_mode || ++ dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { ++ nv_connector->native_mode = drm_mode_create(dev); ++ nouveau_bios_fp_mode(dev, nv_connector->native_mode); ++ } + + /* Find the native mode if this is a digital panel, if we didn't + * find any modes through DDC previously add the native mode to +@@ -569,7 +640,8 @@ nouveau_connector_get_modes(struct drm_connector *connector) + ret = get_slave_funcs(nv_encoder)-> + get_modes(to_drm_encoder(nv_encoder), connector); + +- if (nv_encoder->dcb->type == OUTPUT_LVDS) ++ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || ++ nv_connector->dcb->type == DCB_CONNECTOR_eDP) + ret += nouveau_connector_scaler_modes_add(connector); + + return ret; +@@ -662,148 +734,74 @@ nouveau_connector_funcs = { + .force = nouveau_connector_force + }; + +-static int +-nouveau_connector_create_lvds(struct drm_device *dev, +- struct drm_connector *connector) +-{ +- struct nouveau_connector *nv_connector = nouveau_connector(connector); +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_i2c_chan *i2c = NULL; +- struct nouveau_encoder *nv_encoder; +- struct drm_display_mode native, *mode, *temp; +- bool dummy, if_is_24bit = false; +- int ret, flags; +- +- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); +- if (!nv_encoder) +- return -ENODEV; +- +- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit); +- if (ret) { +- NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n"); +- return ret; +- } +- nv_connector->use_dithering = !if_is_24bit; +- +- /* Firstly try getting EDID over DDC, if allowed and I2C channel +- * is available. +- */ +- if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) +- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); +- +- if (i2c) { +- nouveau_connector_ddc_prepare(connector, &flags); +- nv_connector->edid = drm_get_edid(connector, &i2c->adapter); +- nouveau_connector_ddc_finish(connector, flags); +- } +- +- /* If no EDID found above, and the VBIOS indicates a hardcoded +- * modeline is avalilable for the panel, set it as the panel's +- * native mode and exit. +- */ +- if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && +- (nv_encoder->dcb->lvdsconf.use_straps_for_mode || +- dev_priv->vbios.fp_no_ddc)) { +- nv_connector->native_mode = drm_mode_duplicate(dev, &native); +- goto out; +- } +- +- /* Still nothing, some VBIOS images have a hardcoded EDID block +- * stored for the panel stored in them. +- */ +- if (!nv_connector->edid && !nv_connector->native_mode && +- !dev_priv->vbios.fp_no_ddc) { +- struct edid *edid = +- (struct edid *)nouveau_bios_embedded_edid(dev); +- if (edid) { +- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); +- *(nv_connector->edid) = *edid; +- } +- } +- +- if (!nv_connector->edid) +- goto out; +- +- /* We didn't find/use a panel mode from the VBIOS, so parse the EDID +- * block and look for the preferred mode there. +- */ +- ret = drm_add_edid_modes(connector, nv_connector->edid); +- if (ret == 0) +- goto out; +- nv_connector->detected_encoder = nv_encoder; +- nv_connector->native_mode = nouveau_connector_native_mode(connector); +- list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) +- drm_mode_remove(connector, mode); +- +-out: +- if (!nv_connector->native_mode) { +- NV_ERROR(dev, "LVDS present in DCB table, but couldn't " +- "determine its native mode. Disabling.\n"); +- return -ENODEV; +- } +- +- drm_mode_connector_update_edid_property(connector, nv_connector->edid); +- return 0; +-} ++static const struct drm_connector_funcs ++nouveau_connector_funcs_lvds = { ++ .dpms = drm_helper_connector_dpms, ++ .save = NULL, ++ .restore = NULL, ++ .detect = nouveau_connector_detect_lvds, ++ .destroy = nouveau_connector_destroy, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .set_property = nouveau_connector_set_property, ++ .force = nouveau_connector_force ++}; + +-int +-nouveau_connector_create(struct drm_device *dev, +- struct dcb_connector_table_entry *dcb) ++struct drm_connector * ++nouveau_connector_create(struct drm_device *dev, int index) + { ++ const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = NULL; ++ struct dcb_connector_table_entry *dcb = NULL; + struct drm_connector *connector; +- struct drm_encoder *encoder; +- int ret, type; ++ int type, ret = 0; + + NV_DEBUG_KMS(dev, "\n"); + ++ if (index >= dev_priv->vbios.dcb.connector.entries) ++ return ERR_PTR(-EINVAL); ++ ++ dcb = &dev_priv->vbios.dcb.connector.entry[index]; ++ if (dcb->drm) ++ return dcb->drm; ++ + switch (dcb->type) { +- case DCB_CONNECTOR_NONE: +- return 0; + case DCB_CONNECTOR_VGA: +- NV_INFO(dev, "Detected a VGA connector\n"); + type = DRM_MODE_CONNECTOR_VGA; + break; + case DCB_CONNECTOR_TV_0: + case DCB_CONNECTOR_TV_1: + case DCB_CONNECTOR_TV_3: +- NV_INFO(dev, "Detected a TV connector\n"); + type = DRM_MODE_CONNECTOR_TV; + break; + case DCB_CONNECTOR_DVI_I: +- NV_INFO(dev, "Detected a DVI-I connector\n"); + type = DRM_MODE_CONNECTOR_DVII; + break; + case DCB_CONNECTOR_DVI_D: +- NV_INFO(dev, "Detected a DVI-D connector\n"); + type = DRM_MODE_CONNECTOR_DVID; + break; + case DCB_CONNECTOR_HDMI_0: + case DCB_CONNECTOR_HDMI_1: +- NV_INFO(dev, "Detected a HDMI connector\n"); + type = DRM_MODE_CONNECTOR_HDMIA; + break; + case DCB_CONNECTOR_LVDS: +- NV_INFO(dev, "Detected a LVDS connector\n"); + type = DRM_MODE_CONNECTOR_LVDS; ++ funcs = &nouveau_connector_funcs_lvds; + break; + case DCB_CONNECTOR_DP: +- NV_INFO(dev, "Detected a DisplayPort connector\n"); + type = DRM_MODE_CONNECTOR_DisplayPort; + break; + case DCB_CONNECTOR_eDP: +- NV_INFO(dev, "Detected an eDP connector\n"); + type = DRM_MODE_CONNECTOR_eDP; + break; + default: + NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type); +- return -EINVAL; ++ return ERR_PTR(-EINVAL); + } + + nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); + if (!nv_connector) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + nv_connector->dcb = dcb; + connector = &nv_connector->base; + +@@ -811,27 +809,21 @@ nouveau_connector_create(struct drm_device *dev, + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + +- drm_connector_init(dev, connector, &nouveau_connector_funcs, type); ++ drm_connector_init(dev, connector, funcs, type); + drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); + +- /* attach encoders */ +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); +- +- if (nv_encoder->dcb->connector != dcb->index) +- continue; +- +- if (get_slave_funcs(nv_encoder)) +- get_slave_funcs(nv_encoder)->create_resources(encoder, connector); ++ /* Check if we need dithering enabled */ ++ if (dcb->type == DCB_CONNECTOR_LVDS) { ++ bool dummy, is_24bit = false; + +- drm_mode_connector_attach_encoder(connector, encoder); +- } ++ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); ++ if (ret) { ++ NV_ERROR(dev, "Error parsing LVDS table, disabling " ++ "LVDS\n"); ++ goto fail; ++ } + +- if (!connector->encoder_ids[0]) { +- NV_WARN(dev, " no encoders, ignoring\n"); +- drm_connector_cleanup(connector); +- kfree(connector); +- return 0; ++ nv_connector->use_dithering = !is_24bit; + } + + /* Init DVI-I specific properties */ +@@ -841,9 +833,6 @@ nouveau_connector_create(struct drm_device *dev, + drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); + } + +- if (dcb->type != DCB_CONNECTOR_LVDS) +- nv_connector->use_dithering = false; +- + switch (dcb->type) { + case DCB_CONNECTOR_VGA: + if (dev_priv->card_type >= NV_50) { +@@ -871,14 +860,12 @@ nouveau_connector_create(struct drm_device *dev, + } + + drm_sysfs_connector_add(connector); ++ dcb->drm = connector; ++ return dcb->drm; + +- if (dcb->type == DCB_CONNECTOR_LVDS) { +- ret = nouveau_connector_create_lvds(dev, connector); +- if (ret) { +- connector->funcs->destroy(connector); +- return ret; +- } +- } ++fail: ++ drm_connector_cleanup(connector); ++ kfree(connector); ++ return ERR_PTR(ret); + +- return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h +index 4ef38ab..1ce3d91 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.h ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h +@@ -49,7 +49,7 @@ static inline struct nouveau_connector *nouveau_connector( + return container_of(con, struct nouveau_connector, base); + } + +-int nouveau_connector_create(struct drm_device *, +- struct dcb_connector_table_entry *); ++struct drm_connector * ++nouveau_connector_create(struct drm_device *, int index); + + #endif /* __NOUVEAU_CONNECTOR_H__ */ +diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c +index 65c441a..2e3c6ca 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_dma.c ++++ b/drivers/gpu/drm/nouveau/nouveau_dma.c +@@ -92,11 +92,9 @@ nouveau_dma_init(struct nouveau_channel *chan) + return ret; + + /* Map M2MF notifier object - fbcon. */ +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- ret = nouveau_bo_map(chan->notifier_bo); +- if (ret) +- return ret; +- } ++ ret = nouveau_bo_map(chan->notifier_bo); ++ if (ret) ++ return ret; + + /* Insert NOPS for NOUVEAU_DMA_SKIPS */ + ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); +diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c +index deeb21c..184bc95 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_dp.c ++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c +@@ -271,12 +271,26 @@ nouveau_dp_link_train(struct drm_encoder *encoder) + { + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); +- uint8_t config[4]; +- uint8_t status[3]; ++ struct bit_displayport_encoder_table *dpe; ++ int dpe_headerlen; ++ uint8_t config[4], status[3]; + bool cr_done, cr_max_vs, eq_done; + int ret = 0, i, tries, voltage; + + NV_DEBUG_KMS(dev, "link training!!\n"); ++ ++ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); ++ if (!dpe) { ++ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); ++ return false; ++ } ++ ++ if (dpe->script0) { ++ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); ++ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), ++ nv_encoder->dcb); ++ } ++ + train: + cr_done = eq_done = false; + +@@ -403,6 +417,12 @@ stop: + } + } + ++ if (dpe->script1) { ++ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); ++ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), ++ nv_encoder->dcb); ++ } ++ + return eq_done; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c +index 2737704..b4d958c 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c +@@ -35,10 +35,6 @@ + + #include "drm_pciids.h" + +-MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)"); +-int nouveau_ctxfw = 0; +-module_param_named(ctxfw, nouveau_ctxfw, int, 0400); +- + MODULE_PARM_DESC(noagp, "Disable AGP"); + int nouveau_noagp; + module_param_named(noagp, nouveau_noagp, int, 0400); +@@ -56,7 +52,7 @@ int nouveau_vram_pushbuf; + module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); + + MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); +-int nouveau_vram_notify = 1; ++int nouveau_vram_notify = 0; + module_param_named(vram_notify, nouveau_vram_notify, int, 0400); + + MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); +@@ -155,9 +151,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) + struct drm_crtc *crtc; + int ret, i; + +- if (!drm_core_check_feature(dev, DRIVER_MODESET)) +- return -ENODEV; +- + if (pm_state.event == PM_EVENT_PRETHAW) + return 0; + +@@ -257,9 +250,6 @@ nouveau_pci_resume(struct pci_dev *pdev) + struct drm_crtc *crtc; + int ret, i; + +- if (!drm_core_check_feature(dev, DRIVER_MODESET)) +- return -ENODEV; +- + nouveau_fbcon_save_disable_accel(dev); + + NV_INFO(dev, "We're back, enabling device...\n"); +@@ -323,7 +313,6 @@ nouveau_pci_resume(struct pci_dev *pdev) + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); +- int ret; + + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) +@@ -371,7 +360,8 @@ nouveau_pci_resume(struct pci_dev *pdev) + static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | +- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | ++ DRIVER_MODESET, + .load = nouveau_load, + .firstopen = nouveau_firstopen, + .lastclose = nouveau_lastclose, +@@ -438,16 +428,18 @@ static int __init nouveau_init(void) + nouveau_modeset = 1; + } + +- if (nouveau_modeset == 1) { +- driver.driver_features |= DRIVER_MODESET; +- nouveau_register_dsm_handler(); +- } ++ if (!nouveau_modeset) ++ return 0; + ++ nouveau_register_dsm_handler(); + return drm_init(&driver); + } + + static void __exit nouveau_exit(void) + { ++ if (!nouveau_modeset) ++ return; ++ + drm_exit(&driver); + nouveau_unregister_dsm_handler(); + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +index c697191..51ccd90 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -123,14 +123,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) + return ioptr; + } + +-struct mem_block { +- struct mem_block *next; +- struct mem_block *prev; +- uint64_t start; +- uint64_t size; +- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ +-}; +- + enum nouveau_flags { + NV_NFORCE = 0x10000000, + NV_NFORCE2 = 0x20000000 +@@ -149,7 +141,7 @@ struct nouveau_gpuobj { + struct list_head list; + + struct nouveau_channel *im_channel; +- struct mem_block *im_pramin; ++ struct drm_mm_node *im_pramin; + struct nouveau_bo *im_backing; + uint32_t im_backing_start; + uint32_t *im_backing_suspend; +@@ -196,7 +188,7 @@ struct nouveau_channel { + struct list_head pending; + uint32_t sequence; + uint32_t sequence_ack; +- uint32_t last_sequence_irq; ++ atomic_t last_sequence_irq; + } fence; + + /* DMA push buffer */ +@@ -206,7 +198,7 @@ struct nouveau_channel { + + /* Notifier memory */ + struct nouveau_bo *notifier_bo; +- struct mem_block *notifier_heap; ++ struct drm_mm notifier_heap; + + /* PFIFO context */ + struct nouveau_gpuobj_ref *ramfc; +@@ -224,7 +216,7 @@ struct nouveau_channel { + + /* Objects */ + struct nouveau_gpuobj_ref *ramin; /* Private instmem */ +- struct mem_block *ramin_heap; /* Private PRAMIN heap */ ++ struct drm_mm ramin_heap; /* Private PRAMIN heap */ + struct nouveau_gpuobj_ref *ramht; /* Hash table */ + struct list_head ramht_refs; /* Objects referenced by RAMHT */ + +@@ -277,8 +269,7 @@ struct nouveau_instmem_engine { + void (*clear)(struct drm_device *, struct nouveau_gpuobj *); + int (*bind)(struct drm_device *, struct nouveau_gpuobj *); + int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); +- void (*prepare_access)(struct drm_device *, bool write); +- void (*finish_access)(struct drm_device *); ++ void (*flush)(struct drm_device *); + }; + + struct nouveau_mc_engine { +@@ -303,10 +294,11 @@ struct nouveau_fb_engine { + }; + + struct nouveau_fifo_engine { +- void *priv; +- + int channels; + ++ struct nouveau_gpuobj_ref *playlist[2]; ++ int cur_playlist; ++ + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + +@@ -339,10 +331,11 @@ struct nouveau_pgraph_object_class { + struct nouveau_pgraph_engine { + struct nouveau_pgraph_object_class *grclass; + bool accel_blocked; +- void *ctxprog; +- void *ctxvals; + int grctx_size; + ++ /* NV2x/NV3x context table (0x400780) */ ++ struct nouveau_gpuobj_ref *ctx_table; ++ + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + +@@ -500,11 +493,6 @@ enum nouveau_card_type { + + struct drm_nouveau_private { + struct drm_device *dev; +- enum { +- NOUVEAU_CARD_INIT_DOWN, +- NOUVEAU_CARD_INIT_DONE, +- NOUVEAU_CARD_INIT_FAILED +- } init_state; + + /* the card type, takes NV_* as values */ + enum nouveau_card_type card_type; +@@ -533,8 +521,6 @@ struct drm_nouveau_private { + atomic_t validate_sequence; + } ttm; + +- struct fb_info *fbdev_info; +- + int fifo_alloc_count; + struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; + +@@ -595,11 +581,7 @@ struct drm_nouveau_private { + struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; + int vm_vram_pt_nr; + +- struct mem_block *ramin_heap; +- +- /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ +- uint32_t ctx_table_size; +- struct nouveau_gpuobj_ref *ctx_table; ++ struct drm_mm ramin_heap; + + struct list_head gpuobj_list; + +@@ -618,6 +600,11 @@ struct drm_nouveau_private { + struct backlight_device *backlight; + + struct nouveau_channel *evo; ++ struct { ++ struct dcb_entry *dcb; ++ u16 script; ++ u32 pclk; ++ } evo_irq; + + struct { + struct dentry *channel_root; +@@ -652,14 +639,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) + return 0; + } + +-#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ +- struct drm_nouveau_private *nv = dev->dev_private; \ +- if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ +- NV_ERROR(dev, "called without init\n"); \ +- return -EINVAL; \ +- } \ +-} while (0) +- + #define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ + struct drm_nouveau_private *nv = dev->dev_private; \ + if (!nouveau_channel_owner(dev, (cl), (id))) { \ +@@ -682,7 +661,6 @@ extern int nouveau_tv_disable; + extern char *nouveau_tv_norm; + extern int nouveau_reg_debug; + extern char *nouveau_vbios; +-extern int nouveau_ctxfw; + extern int nouveau_ignorelid; + extern int nouveau_nofbaccel; + extern int nouveau_noaccel; +@@ -707,15 +685,7 @@ extern bool nouveau_wait_for_idle(struct drm_device *); + extern int nouveau_card_init(struct drm_device *); + + /* nouveau_mem.c */ +-extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, +- uint64_t size); +-extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, +- uint64_t size, int align2, +- struct drm_file *, int tail); +-extern void nouveau_mem_takedown(struct mem_block **heap); +-extern void nouveau_mem_free_block(struct mem_block *); + extern int nouveau_mem_detect(struct drm_device *dev); +-extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); + extern int nouveau_mem_init(struct drm_device *); + extern int nouveau_mem_init_agp(struct drm_device *); + extern void nouveau_mem_close(struct drm_device *); +@@ -857,11 +827,13 @@ void nouveau_register_dsm_handler(void); + void nouveau_unregister_dsm_handler(void); + int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); + bool nouveau_acpi_rom_supported(struct pci_dev *pdev); ++int nouveau_acpi_edid(struct drm_device *, struct drm_connector *); + #else + static inline void nouveau_register_dsm_handler(void) {} + static inline void nouveau_unregister_dsm_handler(void) {} + static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } + static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } ++static inline int nouveau_acpi_edid(struct drm_device *, struct drm_connector *) { return -EINVAL; } + #endif + + /* nouveau_backlight.c */ +@@ -1035,12 +1007,6 @@ extern int nv50_graph_unload_context(struct drm_device *); + extern void nv50_graph_context_switch(struct drm_device *); + extern int nv50_grctx_init(struct nouveau_grctx *); + +-/* nouveau_grctx.c */ +-extern int nouveau_grctx_prog_load(struct drm_device *); +-extern void nouveau_grctx_vals_load(struct drm_device *, +- struct nouveau_gpuobj *); +-extern void nouveau_grctx_fini(struct drm_device *); +- + /* nv04_instmem.c */ + extern int nv04_instmem_init(struct drm_device *); + extern void nv04_instmem_takedown(struct drm_device *); +@@ -1051,8 +1017,7 @@ extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, + extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); + extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); + extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +-extern void nv04_instmem_prepare_access(struct drm_device *, bool write); +-extern void nv04_instmem_finish_access(struct drm_device *); ++extern void nv04_instmem_flush(struct drm_device *); + + /* nv50_instmem.c */ + extern int nv50_instmem_init(struct drm_device *); +@@ -1064,8 +1029,8 @@ extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, + extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); + extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); + extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +-extern void nv50_instmem_prepare_access(struct drm_device *, bool write); +-extern void nv50_instmem_finish_access(struct drm_device *); ++extern void nv50_instmem_flush(struct drm_device *); ++extern void nv50_vm_flush(struct drm_device *, int engine); + + /* nv04_mc.c */ + extern int nv04_mc_init(struct drm_device *); +@@ -1088,13 +1053,14 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + + /* nv04_dac.c */ +-extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *); + extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); + extern int nv04_dac_output_offset(struct drm_encoder *encoder); + extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); ++extern bool nv04_dac_in_use(struct drm_encoder *encoder); + + /* nv04_dfp.c */ +-extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *); + extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent); + extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, + int head, bool dl); +@@ -1103,10 +1069,10 @@ extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); + + /* nv04_tv.c */ + extern int nv04_tv_identify(struct drm_device *dev, int i2c_index); +-extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *); + + /* nv17_tv.c */ +-extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *); + + /* nv04_display.c */ + extern int nv04_display_create(struct drm_device *); +@@ -1147,7 +1113,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); + extern int nouveau_fence_flush(void *obj, void *arg); + extern void nouveau_fence_unref(void **obj); + extern void *nouveau_fence_ref(void *obj); +-extern void nouveau_fence_handler(struct drm_device *dev, int channel); + + /* nouveau_gem.c */ + extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, +diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h +index e1df820..a1a0d48 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h ++++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h +@@ -38,13 +38,15 @@ struct nouveau_encoder { + struct dcb_entry *dcb; + int or; + ++ /* different to drm_encoder.crtc, this reflects what's ++ * actually programmed on the hw, not the proposed crtc */ ++ struct drm_crtc *crtc; ++ + struct drm_display_mode mode; + int last_dpms; + + struct nv04_output_reg restore; + +- void (*disconnect)(struct nouveau_encoder *encoder); +- + union { + struct { + int mc_unknown; +@@ -71,8 +73,8 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) + + struct nouveau_connector * + nouveau_encoder_connector_get(struct nouveau_encoder *encoder); +-int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry); +-int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry); ++int nv50_sor_create(struct drm_connector *, struct dcb_entry *); ++int nv50_dac_create(struct drm_connector *, struct dcb_entry *); + + struct bit_displayport_encoder_table { + uint32_t match; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +index 0a59f96..8415049 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +@@ -337,7 +337,7 @@ static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper) + drm_helper_fb_hotplug_event(fb_helper, true); + } + +-int ++static int + nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) + { + struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; +@@ -392,7 +392,8 @@ int nouveau_fbcon_init(struct drm_device *dev) + dev_priv->nfbdev = nfbdev; + nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; + +- ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4, true); ++ ret = drm_fb_helper_init(dev, &nfbdev->helper, ++ nv_two_heads(dev) ? 2 : 1, 4, true); + if (ret) { + kfree(nfbdev); + return ret; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c +index faddf53..813d853 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c +@@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan) + if (USE_REFCNT) + sequence = nvchan_rd32(chan, 0x48); + else +- sequence = chan->fence.last_sequence_irq; ++ sequence = atomic_read(&chan->fence.last_sequence_irq); + + if (chan->fence.sequence_ack == sequence) + return; + chan->fence.sequence_ack = sequence; + ++ spin_lock(&chan->fence.lock); + list_for_each_safe(entry, tmp, &chan->fence.pending) { + fence = list_entry(entry, struct nouveau_fence, entry); + +@@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan) + if (sequence == chan->fence.sequence_ack) + break; + } ++ spin_unlock(&chan->fence.lock); + } + + int +@@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence) + { + struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; + struct nouveau_channel *chan = fence->channel; +- unsigned long flags; + int ret; + + ret = RING_SPACE(chan, 2); +@@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) + return ret; + + if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { +- spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); +- spin_unlock_irqrestore(&chan->fence.lock, flags); + + BUG_ON(chan->fence.sequence == + chan->fence.sequence_ack - 1); +@@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence) + fence->sequence = ++chan->fence.sequence; + + kref_get(&fence->refcount); +- spin_lock_irqsave(&chan->fence.lock, flags); ++ spin_lock(&chan->fence.lock); + list_add_tail(&fence->entry, &chan->fence.pending); +- spin_unlock_irqrestore(&chan->fence.lock, flags); ++ spin_unlock(&chan->fence.lock); + + BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); + OUT_RING(chan, fence->sequence); +@@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) + { + struct nouveau_fence *fence = nouveau_fence(sync_obj); + struct nouveau_channel *chan = fence->channel; +- unsigned long flags; + + if (fence->signalled) + return true; + +- spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); +- spin_unlock_irqrestore(&chan->fence.lock, flags); + return fence->signalled; + } + +@@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) + return 0; + } + +-void +-nouveau_fence_handler(struct drm_device *dev, int channel) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_channel *chan = NULL; +- +- if (channel >= 0 && channel < dev_priv->engine.fifo.channels) +- chan = dev_priv->fifos[channel]; +- +- if (chan) { +- spin_lock_irq(&chan->fence.lock); +- nouveau_fence_update(chan); +- spin_unlock_irq(&chan->fence.lock); +- } +-} +- + int + nouveau_fence_init(struct nouveau_channel *chan) + { + INIT_LIST_HEAD(&chan->fence.pending); + spin_lock_init(&chan->fence.lock); ++ atomic_set(&chan->fence.last_sequence_irq, 0); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +index 69c76cf..547f2c2 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -137,8 +137,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, + uint32_t flags = 0; + int ret = 0; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) + dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; + +@@ -577,10 +575,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, + struct drm_nouveau_gem_pushbuf_bo *bo; + struct nouveau_channel *chan; + struct validate_op op; +- struct nouveau_fence *fence = 0; ++ struct nouveau_fence *fence = NULL; + int i, j, ret = 0, do_reloc = 0; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + + req->vram_available = dev_priv->fb_aper_free; +@@ -760,8 +757,6 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, + bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); + int ret = -EINVAL; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return ret; +@@ -800,8 +795,6 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, + struct nouveau_bo *nvbo; + int ret = -EINVAL; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return ret; +@@ -827,8 +820,6 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, + struct drm_gem_object *gem; + int ret; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return -EINVAL; +diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c +deleted file mode 100644 +index f731c5f..0000000 +--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c ++++ /dev/null +@@ -1,160 +0,0 @@ +-/* +- * Copyright 2009 Red Hat Inc. +- * +- * Permission is hereby granted, free of charge, to any person obtaining a +- * copy of this software and associated documentation files (the "Software"), +- * to deal in the Software without restriction, including without limitation +- * the rights to use, copy, modify, merge, publish, distribute, sublicense, +- * and/or sell copies of the Software, and to permit persons to whom the +- * Software is furnished to do so, subject to the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +- * OTHER DEALINGS IN THE SOFTWARE. +- * +- * Authors: Ben Skeggs +- */ +- +-#include +-#include +- +-#include "drmP.h" +-#include "nouveau_drv.h" +- +-struct nouveau_ctxprog { +- uint32_t signature; +- uint8_t version; +- uint16_t length; +- uint32_t data[]; +-} __attribute__ ((packed)); +- +-struct nouveau_ctxvals { +- uint32_t signature; +- uint8_t version; +- uint32_t length; +- struct { +- uint32_t offset; +- uint32_t value; +- } data[]; +-} __attribute__ ((packed)); +- +-int +-nouveau_grctx_prog_load(struct drm_device *dev) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; +- const int chipset = dev_priv->chipset; +- const struct firmware *fw; +- const struct nouveau_ctxprog *cp; +- const struct nouveau_ctxvals *cv; +- char name[32]; +- int ret, i; +- +- if (pgraph->accel_blocked) +- return -ENODEV; +- +- if (!pgraph->ctxprog) { +- sprintf(name, "nouveau/nv%02x.ctxprog", chipset); +- ret = request_firmware(&fw, name, &dev->pdev->dev); +- if (ret) { +- NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset); +- return ret; +- } +- +- pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); +- if (!pgraph->ctxprog) { +- NV_ERROR(dev, "OOM copying ctxprog\n"); +- release_firmware(fw); +- return -ENOMEM; +- } +- +- cp = pgraph->ctxprog; +- if (le32_to_cpu(cp->signature) != 0x5043564e || +- cp->version != 0 || +- le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) { +- NV_ERROR(dev, "ctxprog invalid\n"); +- release_firmware(fw); +- nouveau_grctx_fini(dev); +- return -EINVAL; +- } +- release_firmware(fw); +- } +- +- if (!pgraph->ctxvals) { +- sprintf(name, "nouveau/nv%02x.ctxvals", chipset); +- ret = request_firmware(&fw, name, &dev->pdev->dev); +- if (ret) { +- NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset); +- nouveau_grctx_fini(dev); +- return ret; +- } +- +- pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); +- if (!pgraph->ctxvals) { +- NV_ERROR(dev, "OOM copying ctxvals\n"); +- release_firmware(fw); +- nouveau_grctx_fini(dev); +- return -ENOMEM; +- } +- +- cv = (void *)pgraph->ctxvals; +- if (le32_to_cpu(cv->signature) != 0x5643564e || +- cv->version != 0 || +- le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) { +- NV_ERROR(dev, "ctxvals invalid\n"); +- release_firmware(fw); +- nouveau_grctx_fini(dev); +- return -EINVAL; +- } +- release_firmware(fw); +- } +- +- cp = pgraph->ctxprog; +- +- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); +- for (i = 0; i < le16_to_cpu(cp->length); i++) +- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, +- le32_to_cpu(cp->data[i])); +- +- return 0; +-} +- +-void +-nouveau_grctx_fini(struct drm_device *dev) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; +- +- if (pgraph->ctxprog) { +- kfree(pgraph->ctxprog); +- pgraph->ctxprog = NULL; +- } +- +- if (pgraph->ctxvals) { +- kfree(pgraph->ctxprog); +- pgraph->ctxvals = NULL; +- } +-} +- +-void +-nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; +- struct nouveau_ctxvals *cv = pgraph->ctxvals; +- int i; +- +- if (!cv) +- return; +- +- for (i = 0; i < le32_to_cpu(cv->length); i++) +- nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset), +- le32_to_cpu(cv->data[i].value)); +-} +diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c +index c1fd42b..09db6f6 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_mem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c +@@ -35,162 +35,6 @@ + #include "drm_sarea.h" + #include "nouveau_drv.h" + +-static struct mem_block * +-split_block(struct mem_block *p, uint64_t start, uint64_t size, +- struct drm_file *file_priv) +-{ +- /* Maybe cut off the start of an existing block */ +- if (start > p->start) { +- struct mem_block *newblock = +- kmalloc(sizeof(*newblock), GFP_KERNEL); +- if (!newblock) +- goto out; +- newblock->start = start; +- newblock->size = p->size - (start - p->start); +- newblock->file_priv = NULL; +- newblock->next = p->next; +- newblock->prev = p; +- p->next->prev = newblock; +- p->next = newblock; +- p->size -= newblock->size; +- p = newblock; +- } +- +- /* Maybe cut off the end of an existing block */ +- if (size < p->size) { +- struct mem_block *newblock = +- kmalloc(sizeof(*newblock), GFP_KERNEL); +- if (!newblock) +- goto out; +- newblock->start = start + size; +- newblock->size = p->size - size; +- newblock->file_priv = NULL; +- newblock->next = p->next; +- newblock->prev = p; +- p->next->prev = newblock; +- p->next = newblock; +- p->size = size; +- } +- +-out: +- /* Our block is in the middle */ +- p->file_priv = file_priv; +- return p; +-} +- +-struct mem_block * +-nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, +- int align2, struct drm_file *file_priv, int tail) +-{ +- struct mem_block *p; +- uint64_t mask = (1 << align2) - 1; +- +- if (!heap) +- return NULL; +- +- if (tail) { +- list_for_each_prev(p, heap) { +- uint64_t start = ((p->start + p->size) - size) & ~mask; +- +- if (p->file_priv == NULL && start >= p->start && +- start + size <= p->start + p->size) +- return split_block(p, start, size, file_priv); +- } +- } else { +- list_for_each(p, heap) { +- uint64_t start = (p->start + mask) & ~mask; +- +- if (p->file_priv == NULL && +- start + size <= p->start + p->size) +- return split_block(p, start, size, file_priv); +- } +- } +- +- return NULL; +-} +- +-void nouveau_mem_free_block(struct mem_block *p) +-{ +- p->file_priv = NULL; +- +- /* Assumes a single contiguous range. Needs a special file_priv in +- * 'heap' to stop it being subsumed. +- */ +- if (p->next->file_priv == NULL) { +- struct mem_block *q = p->next; +- p->size += q->size; +- p->next = q->next; +- p->next->prev = p; +- kfree(q); +- } +- +- if (p->prev->file_priv == NULL) { +- struct mem_block *q = p->prev; +- q->size += p->size; +- q->next = p->next; +- q->next->prev = q; +- kfree(p); +- } +-} +- +-/* Initialize. How to check for an uninitialized heap? +- */ +-int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, +- uint64_t size) +-{ +- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); +- +- if (!blocks) +- return -ENOMEM; +- +- *heap = kmalloc(sizeof(**heap), GFP_KERNEL); +- if (!*heap) { +- kfree(blocks); +- return -ENOMEM; +- } +- +- blocks->start = start; +- blocks->size = size; +- blocks->file_priv = NULL; +- blocks->next = blocks->prev = *heap; +- +- memset(*heap, 0, sizeof(**heap)); +- (*heap)->file_priv = (struct drm_file *) -1; +- (*heap)->next = (*heap)->prev = blocks; +- return 0; +-} +- +-/* +- * Free all blocks associated with the releasing file_priv +- */ +-void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) +-{ +- struct mem_block *p; +- +- if (!heap || !heap->next) +- return; +- +- list_for_each(p, heap) { +- if (p->file_priv == file_priv) +- p->file_priv = NULL; +- } +- +- /* Assumes a single contiguous range. Needs a special file_priv in +- * 'heap' to stop it being subsumed. +- */ +- list_for_each(p, heap) { +- while ((p->file_priv == NULL) && +- (p->next->file_priv == NULL) && +- (p->next != heap)) { +- struct mem_block *q = p->next; +- p->size += q->size; +- p->next = q->next; +- p->next->prev = p; +- kfree(q); +- } +- } +-} +- + /* + * NV10-NV40 tiling helpers + */ +@@ -299,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, + phys |= 0x30; + } + +- dev_priv->engine.instmem.prepare_access(dev, true); + while (size) { + unsigned offset_h = upper_32_bits(phys); + unsigned offset_l = lower_32_bits(phys); +@@ -331,36 +174,12 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, + } + } + } +- dev_priv->engine.instmem.finish_access(dev); +- +- nv_wr32(dev, 0x100c80, 0x00050001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } +- +- nv_wr32(dev, 0x100c80, 0x00000001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } +- +- nv_wr32(dev, 0x100c80, 0x00040001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } +- +- nv_wr32(dev, 0x100c80, 0x00060001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } ++ dev_priv->engine.instmem.flush(dev); + ++ nv50_vm_flush(dev, 5); ++ nv50_vm_flush(dev, 0); ++ nv50_vm_flush(dev, 4); ++ nv50_vm_flush(dev, 6); + return 0; + } + +@@ -374,7 +193,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) + virt -= dev_priv->vm_vram_base; + pages = (size >> 16) << 1; + +- dev_priv->engine.instmem.prepare_access(dev, true); + while (pages) { + pgt = dev_priv->vm_vram_pt[virt >> 29]; + pte = (virt & 0x1ffe0000ULL) >> 15; +@@ -388,57 +206,19 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) + while (pte < end) + nv_wo32(dev, pgt, pte++, 0); + } +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + +- nv_wr32(dev, 0x100c80, 0x00050001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return; +- } +- +- nv_wr32(dev, 0x100c80, 0x00000001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return; +- } +- +- nv_wr32(dev, 0x100c80, 0x00040001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return; +- } +- +- nv_wr32(dev, 0x100c80, 0x00060001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- } ++ nv50_vm_flush(dev, 5); ++ nv50_vm_flush(dev, 0); ++ nv50_vm_flush(dev, 4); ++ nv50_vm_flush(dev, 6); + } + + /* + * Cleanup everything + */ +-void nouveau_mem_takedown(struct mem_block **heap) +-{ +- struct mem_block *p; +- +- if (!*heap) +- return; +- +- for (p = (*heap)->next; p != *heap;) { +- struct mem_block *q = p; +- p = p->next; +- kfree(q); +- } +- +- kfree(*heap); +- *heap = NULL; +-} +- +-void nouveau_mem_close(struct drm_device *dev) ++void ++nouveau_mem_close(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + +@@ -449,8 +229,7 @@ void nouveau_mem_close(struct drm_device *dev) + + nouveau_ttm_global_release(dev_priv); + +- if (drm_core_has_AGP(dev) && dev->agp && +- drm_core_check_feature(dev, DRIVER_MODESET)) { ++ if (drm_core_has_AGP(dev) && dev->agp) { + struct drm_agp_mem *entry, *tempe; + + /* Remove AGP resources, but leave dev->agp +@@ -470,10 +249,10 @@ void nouveau_mem_close(struct drm_device *dev) + dev->agp->enabled = 0; + } + +- if (dev_priv->fb_mtrr) { ++ if (dev_priv->fb_mtrr >= 0) { + drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1), + drm_get_resource_len(dev, 1), DRM_MTRR_WC); +- dev_priv->fb_mtrr = 0; ++ dev_priv->fb_mtrr = -1; + } + } + +@@ -536,12 +315,18 @@ nouveau_mem_detect(struct drm_device *dev) + } else + if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { + dev_priv->vram_size = nouveau_mem_detect_nforce(dev); +- } else { ++ } else ++ if (dev_priv->card_type < NV_50) { + dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); + dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; +- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) ++ } else { ++ dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); ++ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; ++ dev_priv->vram_size &= 0xffffffff00ll; ++ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { + dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); + dev_priv->vram_sys_base <<= 12; ++ } + } + + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); +diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c +index 9537f3e..3ec181f 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c ++++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c +@@ -55,7 +55,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) + if (ret) + goto out_err; + +- ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size); ++ ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); + if (ret) + goto out_err; + +@@ -80,7 +80,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) + nouveau_bo_unpin(chan->notifier_bo); + mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); +- nouveau_mem_takedown(&chan->notifier_heap); ++ drm_mm_takedown(&chan->notifier_heap); + } + + static void +@@ -90,7 +90,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, + NV_DEBUG(dev, "\n"); + + if (gpuobj->priv) +- nouveau_mem_free_block(gpuobj->priv); ++ drm_mm_put_block(gpuobj->priv); + } + + int +@@ -100,18 +100,13 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *nobj = NULL; +- struct mem_block *mem; ++ struct drm_mm_node *mem; + uint32_t offset; + int target, ret; + +- if (!chan->notifier_heap) { +- NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n", +- chan->id); +- return -EINVAL; +- } +- +- mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0, +- (struct drm_file *)-2, 0); ++ mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); ++ if (mem) ++ mem = drm_mm_get_block(mem, size, 0); + if (!mem) { + NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); + return -ENOMEM; +@@ -144,17 +139,17 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, + mem->size, NV_DMA_ACCESS_RW, target, + &nobj); + if (ret) { +- nouveau_mem_free_block(mem); ++ drm_mm_put_block(mem); + NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); + return ret; + } +- nobj->dtor = nouveau_notifier_gpuobj_dtor; +- nobj->priv = mem; ++ nobj->dtor = nouveau_notifier_gpuobj_dtor; ++ nobj->priv = mem; + + ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); + if (ret) { + nouveau_gpuobj_del(dev, &nobj); +- nouveau_mem_free_block(mem); ++ drm_mm_put_block(mem); + NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); + return ret; + } +@@ -170,7 +165,7 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) + return -EINVAL; + + if (poffset) { +- struct mem_block *mem = nobj->priv; ++ struct drm_mm_node *mem = nobj->priv; + + if (*poffset >= mem->size) + return false; +@@ -189,7 +184,6 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, + struct nouveau_channel *chan; + int ret; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); + + ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); +diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c +index e7c100b..4bf6b33 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_object.c ++++ b/drivers/gpu/drm/nouveau/nouveau_object.c +@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) + } + } + +- instmem->prepare_access(dev, true); + co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { +@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) + nv_wo32(dev, ramht, (co + 4)/4, ctx); + + list_add_tail(&ref->list, &chan->ramht_refs); +- instmem->finish_access(dev); ++ instmem->flush(dev); + return 0; + } + NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", +@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); +- instmem->finish_access(dev); + + NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); + return -ENOMEM; +@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) + return; + } + +- instmem->prepare_access(dev, true); + co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && +@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) + nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); + + list_del(&ref->list); +- instmem->finish_access(dev); ++ instmem->flush(dev); + return; + } + +@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) + co = 0; + } while (co != ho); + list_del(&ref->list); +- instmem->finish_access(dev); + + NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", + chan->id, ref->handle); +@@ -209,7 +205,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_gpuobj *gpuobj; +- struct mem_block *pramin = NULL; ++ struct drm_mm *pramin = NULL; + int ret; + + NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", +@@ -233,25 +229,12 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + * available. + */ + if (chan) { +- if (chan->ramin_heap) { +- NV_DEBUG(dev, "private heap\n"); +- pramin = chan->ramin_heap; +- } else +- if (dev_priv->card_type < NV_50) { +- NV_DEBUG(dev, "global heap fallback\n"); +- pramin = dev_priv->ramin_heap; +- } ++ NV_DEBUG(dev, "channel heap\n"); ++ pramin = &chan->ramin_heap; + } else { + NV_DEBUG(dev, "global heap\n"); +- pramin = dev_priv->ramin_heap; +- } +- +- if (!pramin) { +- NV_ERROR(dev, "No PRAMIN heap!\n"); +- return -EINVAL; +- } ++ pramin = &dev_priv->ramin_heap; + +- if (!chan) { + ret = engine->instmem.populate(dev, gpuobj, &size); + if (ret) { + nouveau_gpuobj_del(dev, &gpuobj); +@@ -260,9 +243,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + } + + /* Allocate a chunk of the PRAMIN aperture */ +- gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, +- drm_order(align), +- (struct drm_file *)-2, 0); ++ gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); ++ if (gpuobj->im_pramin) ++ gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); ++ + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; +@@ -279,10 +263,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + int i; + +- engine->instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(dev, gpuobj, i/4, 0); +- engine->instmem.finish_access(dev); ++ engine->instmem.flush(dev); + } + + *gpuobj_ret = gpuobj; +@@ -370,10 +353,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) + } + + if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { +- engine->instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(dev, gpuobj, i/4, 0); +- engine->instmem.finish_access(dev); ++ engine->instmem.flush(dev); + } + + if (gpuobj->dtor) +@@ -386,7 +368,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + kfree(gpuobj->im_pramin); + else +- nouveau_mem_free_block(gpuobj->im_pramin); ++ drm_mm_put_block(gpuobj->im_pramin); + } + + list_del(&gpuobj->list); +@@ -589,7 +571,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + + if (p_offset != ~0) { +- gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), ++ gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), + GFP_KERNEL); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); +@@ -605,10 +587,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { +- dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(dev, gpuobj, i/4, 0); +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + } + + if (pref) { +@@ -696,8 +677,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, + return ret; + } + +- instmem->prepare_access(dev, true); +- + if (dev_priv->card_type < NV_50) { + uint32_t frame, adjust, pte_flags = 0; + +@@ -734,7 +713,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, + nv_wo32(dev, *gpuobj, 5, flags5); + } + +- instmem->finish_access(dev); ++ instmem->flush(dev); + + (*gpuobj)->engine = NVOBJ_ENGINE_SW; + (*gpuobj)->class = class; +@@ -849,7 +828,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, + return ret; + } + +- dev_priv->engine.instmem.prepare_access(dev, true); + if (dev_priv->card_type >= NV_50) { + nv_wo32(dev, *gpuobj, 0, class); + nv_wo32(dev, *gpuobj, 5, 0x00010000); +@@ -874,7 +852,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, + } + } + } +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + (*gpuobj)->engine = NVOBJ_ENGINE_GR; + (*gpuobj)->class = class; +@@ -920,6 +898,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) + base = 0; + + /* PGRAPH context */ ++ size += dev_priv->engine.graph.grctx_size; + + if (dev_priv->card_type == NV_50) { + /* Various fixed table thingos */ +@@ -930,12 +909,8 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) + size += 0x8000; + /* RAMFC */ + size += 0x1000; +- /* PGRAPH context */ +- size += 0x70000; + } + +- NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", +- chan->id, size, base); + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, + &chan->ramin); + if (ret) { +@@ -944,8 +919,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) + } + pramin = chan->ramin->gpuobj; + +- ret = nouveau_mem_init_heap(&chan->ramin_heap, +- pramin->im_pramin->start + base, size); ++ ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); + if (ret) { + NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); + nouveau_gpuobj_ref_del(dev, &chan->ramin); +@@ -969,15 +943,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + + NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); + +- /* Reserve a block of PRAMIN for the channel +- *XXX: maybe on card_type == NV_50) { +- ret = nouveau_gpuobj_channel_init_pramin(chan); +- if (ret) { +- NV_ERROR(dev, "init pramin\n"); +- return ret; +- } ++ /* Allocate a chunk of memory for per-channel object storage */ ++ ret = nouveau_gpuobj_channel_init_pramin(chan); ++ if (ret) { ++ NV_ERROR(dev, "init pramin\n"); ++ return ret; + } + + /* NV50 VM +@@ -988,17 +958,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + if (dev_priv->card_type >= NV_50) { + uint32_t vm_offset, pde; + +- instmem->prepare_access(dev, true); +- + vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; + vm_offset += chan->ramin->gpuobj->im_pramin->start; + + ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, + 0, &chan->vm_pd, NULL); +- if (ret) { +- instmem->finish_access(dev); ++ if (ret) + return ret; +- } + for (i = 0; i < 0x4000; i += 8) { + nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); + nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); +@@ -1008,10 +974,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + ret = nouveau_gpuobj_ref_add(dev, NULL, 0, + dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt); +- if (ret) { +- instmem->finish_access(dev); ++ if (ret) + return ret; +- } + nv_wo32(dev, chan->vm_pd, pde++, + chan->vm_gart_pt->instance | 0x03); + nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); +@@ -1021,17 +985,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + ret = nouveau_gpuobj_ref_add(dev, NULL, 0, + dev_priv->vm_vram_pt[i], + &chan->vm_vram_pt[i]); +- if (ret) { +- instmem->finish_access(dev); ++ if (ret) + return ret; +- } + + nv_wo32(dev, chan->vm_pd, pde++, + chan->vm_vram_pt[i]->instance | 0x61); + nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + } + +- instmem->finish_access(dev); ++ instmem->flush(dev); + } + + /* RAMHT */ +@@ -1130,8 +1092,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) + nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); + +- if (chan->ramin_heap) +- nouveau_mem_takedown(&chan->ramin_heap); ++ if (chan->ramin_heap.fl_entry.next) ++ drm_mm_takedown(&chan->ramin_heap); + if (chan->ramin) + nouveau_gpuobj_ref_del(dev, &chan->ramin); + +@@ -1164,10 +1126,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev) + return -ENOMEM; + } + +- dev_priv->engine.instmem.prepare_access(dev, false); + for (i = 0; i < gpuobj->im_pramin->size / 4; i++) + gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); +- dev_priv->engine.instmem.finish_access(dev); + } + + return 0; +@@ -1212,10 +1172,9 @@ nouveau_gpuobj_resume(struct drm_device *dev) + if (!gpuobj->im_backing_suspend) + continue; + +- dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size / 4; i++) + nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + } + + nouveau_gpuobj_suspend_cleanup(dev); +@@ -1232,7 +1191,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, + struct nouveau_channel *chan; + int ret; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); + + if (init->handle == ~0) +@@ -1283,7 +1241,6 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, + struct nouveau_channel *chan; + int ret; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); + + ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); +diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h +index 6ca80a3..b6391a1 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_reg.h ++++ b/drivers/gpu/drm/nouveau/nouveau_reg.h +@@ -814,6 +814,7 @@ + #define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 + #define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff + #define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) ++#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 + #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 + #define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 + #define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000 +diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c +index 1d6ee8b..491767f 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c ++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c +@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) + + NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); + +- dev_priv->engine.instmem.prepare_access(nvbe->dev, true); + pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); + nvbe->pte_start = pte; + for (i = 0; i < nvbe->nr_pages; i++) { +@@ -116,24 +115,11 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) + dma_offset += NV_CTXDMA_PAGE_SIZE; + } + } +- dev_priv->engine.instmem.finish_access(nvbe->dev); ++ dev_priv->engine.instmem.flush(nvbe->dev); + + if (dev_priv->card_type == NV_50) { +- nv_wr32(dev, 0x100c80, 0x00050001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", +- nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } +- +- nv_wr32(dev, 0x100c80, 0x00000001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", +- nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } ++ nv50_vm_flush(dev, 5); /* PGRAPH */ ++ nv50_vm_flush(dev, 0); /* PFIFO */ + } + + nvbe->bound = true; +@@ -154,7 +140,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be) + if (!nvbe->bound) + return 0; + +- dev_priv->engine.instmem.prepare_access(nvbe->dev, true); + pte = nvbe->pte_start; + for (i = 0; i < nvbe->nr_pages; i++) { + dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; +@@ -170,24 +155,11 @@ nouveau_sgdma_unbind(struct ttm_backend *be) + dma_offset += NV_CTXDMA_PAGE_SIZE; + } + } +- dev_priv->engine.instmem.finish_access(nvbe->dev); ++ dev_priv->engine.instmem.flush(nvbe->dev); + + if (dev_priv->card_type == NV_50) { +- nv_wr32(dev, 0x100c80, 0x00050001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", +- nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } +- +- nv_wr32(dev, 0x100c80, 0x00000001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", +- nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } ++ nv50_vm_flush(dev, 5); ++ nv50_vm_flush(dev, 0); + } + + nvbe->bound = false; +@@ -272,7 +244,6 @@ nouveau_sgdma_init(struct drm_device *dev) + pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + +- dev_priv->engine.instmem.prepare_access(dev, true); + if (dev_priv->card_type < NV_50) { + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE +@@ -294,7 +265,7 @@ nouveau_sgdma_init(struct drm_device *dev) + nv_wo32(dev, gpuobj, (i+4)/4, 0); + } + } +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; +@@ -325,14 +296,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; +- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + int pte; + + pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) { +- instmem->prepare_access(dev, false); + *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; +- instmem->finish_access(dev); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index 4c26be6..63c2d24 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -54,8 +54,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; +- engine->instmem.prepare_access = nv04_instmem_prepare_access; +- engine->instmem.finish_access = nv04_instmem_finish_access; ++ engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; +@@ -95,8 +94,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; +- engine->instmem.prepare_access = nv04_instmem_prepare_access; +- engine->instmem.finish_access = nv04_instmem_finish_access; ++ engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; +@@ -138,8 +136,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; +- engine->instmem.prepare_access = nv04_instmem_prepare_access; +- engine->instmem.finish_access = nv04_instmem_finish_access; ++ engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; +@@ -181,8 +178,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; +- engine->instmem.prepare_access = nv04_instmem_prepare_access; +- engine->instmem.finish_access = nv04_instmem_finish_access; ++ engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; +@@ -225,8 +221,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; +- engine->instmem.prepare_access = nv04_instmem_prepare_access; +- engine->instmem.finish_access = nv04_instmem_finish_access; ++ engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv40_mc_init; + engine->mc.takedown = nv40_mc_takedown; + engine->timer.init = nv04_timer_init; +@@ -271,8 +266,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->instmem.clear = nv50_instmem_clear; + engine->instmem.bind = nv50_instmem_bind; + engine->instmem.unbind = nv50_instmem_unbind; +- engine->instmem.prepare_access = nv50_instmem_prepare_access; +- engine->instmem.finish_access = nv50_instmem_finish_access; ++ engine->instmem.flush = nv50_instmem_flush; + engine->mc.init = nv50_mc_init; + engine->mc.takedown = nv50_mc_takedown; + engine->timer.init = nv04_timer_init; +@@ -404,11 +398,6 @@ nouveau_card_init(struct drm_device *dev) + struct nouveau_engine *engine; + int ret; + +- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); +- +- if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) +- return 0; +- + vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); + vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, + nouveau_switcheroo_can_switch); +@@ -418,15 +407,12 @@ nouveau_card_init(struct drm_device *dev) + if (ret) + goto out; + engine = &dev_priv->engine; +- dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; + spin_lock_init(&dev_priv->context_switch_lock); + + /* Parse BIOS tables / Run init tables if card not POSTed */ +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- ret = nouveau_bios_init(dev); +- if (ret) +- goto out; +- } ++ ret = nouveau_bios_init(dev); ++ if (ret) ++ goto out; + + ret = nouveau_mem_detect(dev); + if (ret) +@@ -482,12 +468,19 @@ nouveau_card_init(struct drm_device *dev) + goto out_graph; + } + ++ if (dev_priv->card_type >= NV_50) ++ ret = nv50_display_create(dev); ++ else ++ ret = nv04_display_create(dev); ++ if (ret) ++ goto out_fifo; ++ + /* this call irq_preinstall, register irq handler and + * call irq_postinstall + */ + ret = drm_irq_install(dev); + if (ret) +- goto out_fifo; ++ goto out_display; + + ret = drm_vblank_init(dev, 0); + if (ret) +@@ -501,33 +494,20 @@ nouveau_card_init(struct drm_device *dev) + goto out_irq; + } + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- if (dev_priv->card_type >= NV_50) +- ret = nv50_display_create(dev); +- else +- ret = nv04_display_create(dev); +- if (ret) +- goto out_channel; +- } +- + ret = nouveau_backlight_init(dev); + if (ret) + NV_ERROR(dev, "Error %d registering backlight\n", ret); + +- dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; +- +- if (drm_core_check_feature(dev, DRIVER_MODESET)) +- nouveau_fbcon_init(dev); +- ++ nouveau_fbcon_init(dev); + return 0; + +-out_channel: +- if (dev_priv->channel) { +- nouveau_channel_free(dev_priv->channel); +- dev_priv->channel = NULL; +- } + out_irq: + drm_irq_uninstall(dev); ++out_display: ++ if (dev_priv->card_type >= NV_50) ++ nv50_display_destroy(dev); ++ else ++ nv04_display_destroy(dev); + out_fifo: + if (!nouveau_noaccel) + engine->fifo.takedown(dev); +@@ -561,45 +541,37 @@ static void nouveau_card_takedown(struct drm_device *dev) + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + +- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); +- +- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { +- +- nouveau_backlight_exit(dev); +- +- if (dev_priv->channel) { +- nouveau_channel_free(dev_priv->channel); +- dev_priv->channel = NULL; +- } ++ nouveau_backlight_exit(dev); + +- if (!nouveau_noaccel) { +- engine->fifo.takedown(dev); +- engine->graph.takedown(dev); +- } +- engine->fb.takedown(dev); +- engine->timer.takedown(dev); +- engine->mc.takedown(dev); ++ if (dev_priv->channel) { ++ nouveau_channel_free(dev_priv->channel); ++ dev_priv->channel = NULL; ++ } + +- mutex_lock(&dev->struct_mutex); +- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); +- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); +- mutex_unlock(&dev->struct_mutex); +- nouveau_sgdma_takedown(dev); ++ if (!nouveau_noaccel) { ++ engine->fifo.takedown(dev); ++ engine->graph.takedown(dev); ++ } ++ engine->fb.takedown(dev); ++ engine->timer.takedown(dev); ++ engine->mc.takedown(dev); + +- nouveau_gpuobj_takedown(dev); +- nouveau_mem_close(dev); +- engine->instmem.takedown(dev); ++ mutex_lock(&dev->struct_mutex); ++ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ++ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); ++ mutex_unlock(&dev->struct_mutex); ++ nouveau_sgdma_takedown(dev); + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) +- drm_irq_uninstall(dev); ++ nouveau_gpuobj_takedown(dev); ++ nouveau_mem_close(dev); ++ engine->instmem.takedown(dev); + +- nouveau_gpuobj_late_takedown(dev); +- nouveau_bios_takedown(dev); ++ drm_irq_uninstall(dev); + +- vga_client_register(dev->pdev, NULL, NULL, NULL); ++ nouveau_gpuobj_late_takedown(dev); ++ nouveau_bios_takedown(dev); + +- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; +- } ++ vga_client_register(dev->pdev, NULL, NULL, NULL); + } + + /* here a client dies, release the stuff that was allocated for its +@@ -686,6 +658,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + struct drm_nouveau_private *dev_priv; + uint32_t reg0; + resource_size_t mmio_start_offs; ++ int ret; + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (!dev_priv) +@@ -694,7 +667,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + dev_priv->dev = dev; + + dev_priv->flags = flags & NOUVEAU_FLAGS; +- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; + + NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", + dev->pci_vendor, dev->pci_device, dev->pdev->class); +@@ -768,11 +740,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", + dev_priv->card_type, reg0); + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- int ret = nouveau_remove_conflicting_drivers(dev); +- if (ret) +- return ret; +- } ++ ret = nouveau_remove_conflicting_drivers(dev); ++ if (ret) ++ return ret; + + /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ + if (dev_priv->card_type >= NV_40) { +@@ -807,45 +777,27 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + dev_priv->flags |= NV_NFORCE2; + + /* For kernel modesetting, init card now and bring up fbcon */ +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- int ret = nouveau_card_init(dev); +- if (ret) +- return ret; +- } ++ ret = nouveau_card_init(dev); ++ if (ret) ++ return ret; + + return 0; + } + +-static void nouveau_close(struct drm_device *dev) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- +- /* In the case of an error dev_priv may not be allocated yet */ +- if (dev_priv) +- nouveau_card_takedown(dev); +-} +- +-/* KMS: we need mmio at load time, not when the first drm client opens. */ + void nouveau_lastclose(struct drm_device *dev) + { +- if (drm_core_check_feature(dev, DRIVER_MODESET)) +- return; +- +- nouveau_close(dev); + } + + int nouveau_unload(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- nouveau_fbcon_fini(dev); +- if (dev_priv->card_type >= NV_50) +- nv50_display_destroy(dev); +- else +- nv04_display_destroy(dev); +- nouveau_close(dev); +- } ++ nouveau_fbcon_fini(dev); ++ if (dev_priv->card_type >= NV_50) ++ nv50_display_destroy(dev); ++ else ++ nv04_display_destroy(dev); ++ nouveau_card_takedown(dev); + + iounmap(dev_priv->mmio); + iounmap(dev_priv->ramin); +@@ -861,8 +813,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_getparam *getparam = data; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + switch (getparam->param) { + case NOUVEAU_GETPARAM_CHIPSET_ID: + getparam->value = dev_priv->chipset; +@@ -931,8 +881,6 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, + { + struct drm_nouveau_setparam *setparam = data; + +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- + switch (setparam->param) { + default: + NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); +diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c +index 1cb19e3..2d0fee5 100644 +--- a/drivers/gpu/drm/nouveau/nv04_dac.c ++++ b/drivers/gpu/drm/nouveau/nv04_dac.c +@@ -261,12 +261,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) + + saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + head = (saved_routput & 0x100) >> 8; +-#if 0 +- /* if there's a spare crtc, using it will minimise flicker for the case +- * where the in-use crtc is in use by an off-chip tmds encoder */ +- if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled) ++ ++ /* if there's a spare crtc, using it will minimise flicker */ ++ if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) + head ^= 1; +-#endif ++ + /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ + routput = (saved_routput & 0xfffffece) | head << 8; + +@@ -315,9 +314,12 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) + { + struct drm_device *dev = encoder->dev; + struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; +- uint32_t sample = nv17_dac_sample_load(encoder); + +- if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { ++ if (nv04_dac_in_use(encoder)) ++ return connector_status_disconnected; ++ ++ if (nv17_dac_sample_load(encoder) & ++ NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { + NV_INFO(dev, "Load detected on output %c\n", + '@' + ffs(dcb->or)); + return connector_status_connected; +@@ -330,6 +332,9 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { ++ if (nv04_dac_in_use(encoder)) ++ return false; ++ + return true; + } + +@@ -428,6 +433,17 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) + } + } + ++/* Check if the DAC corresponding to 'encoder' is being used by ++ * someone else. */ ++bool nv04_dac_in_use(struct drm_encoder *encoder) ++{ ++ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; ++ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; ++ ++ return nv_gf4_disp_arch(encoder->dev) && ++ (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index)); ++} ++ + static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) + { + struct drm_device *dev = encoder->dev; +@@ -501,11 +517,13 @@ static const struct drm_encoder_funcs nv04_dac_funcs = { + .destroy = nv04_dac_destroy, + }; + +-int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry) + { + const struct drm_encoder_helper_funcs *helper; +- struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder = NULL; ++ struct drm_device *dev = connector->dev; ++ struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) +@@ -527,5 +545,6 @@ int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c +index 41634d4..3311f3a 100644 +--- a/drivers/gpu/drm/nouveau/nv04_dfp.c ++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c +@@ -413,10 +413,6 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) + struct dcb_entry *dcbe = nv_encoder->dcb; + int head = nouveau_crtc(encoder->crtc)->index; + +- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", +- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), +- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +- + if (dcbe->type == OUTPUT_TMDS) + run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); + else if (dcbe->type == OUTPUT_LVDS) +@@ -584,11 +580,12 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = { + .destroy = nv04_dfp_destroy, + }; + +-int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry) + { + const struct drm_encoder_helper_funcs *helper; +- struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder = NULL; ++ struct drm_encoder *encoder; + int type; + + switch (entry->type) { +@@ -613,11 +610,12 @@ int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + +- drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type); ++ drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); + drm_encoder_helper_add(encoder, helper); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c +index c7898b4..b35b7ed 100644 +--- a/drivers/gpu/drm/nouveau/nv04_display.c ++++ b/drivers/gpu/drm/nouveau/nv04_display.c +@@ -94,6 +94,7 @@ nv04_display_create(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_table *dcb = &dev_priv->vbios.dcb; ++ struct drm_connector *connector, *ct; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + int i, ret; +@@ -132,19 +133,23 @@ nv04_display_create(struct drm_device *dev) + for (i = 0; i < dcb->entries; i++) { + struct dcb_entry *dcbent = &dcb->entry[i]; + ++ connector = nouveau_connector_create(dev, dcbent->connector); ++ if (IS_ERR(connector)) ++ continue; ++ + switch (dcbent->type) { + case OUTPUT_ANALOG: +- ret = nv04_dac_create(dev, dcbent); ++ ret = nv04_dac_create(connector, dcbent); + break; + case OUTPUT_LVDS: + case OUTPUT_TMDS: +- ret = nv04_dfp_create(dev, dcbent); ++ ret = nv04_dfp_create(connector, dcbent); + break; + case OUTPUT_TV: + if (dcbent->location == DCB_LOC_ON_CHIP) +- ret = nv17_tv_create(dev, dcbent); ++ ret = nv17_tv_create(connector, dcbent); + else +- ret = nv04_tv_create(dev, dcbent); ++ ret = nv04_tv_create(connector, dcbent); + break; + default: + NV_WARN(dev, "DCB type %d not known\n", dcbent->type); +@@ -155,8 +160,14 @@ nv04_display_create(struct drm_device *dev) + continue; + } + +- for (i = 0; i < dcb->connector.entries; i++) +- nouveau_connector_create(dev, &dcb->connector.entry[i]); ++ list_for_each_entry_safe(connector, ct, ++ &dev->mode_config.connector_list, head) { ++ if (!connector->encoder_ids[0]) { ++ NV_WARN(dev, "%s has no encoders, removing\n", ++ drm_get_connector_name(connector)); ++ connector->funcs->destroy(connector); ++ } ++ } + + /* Save previous state */ + NVLockVgaCrtcs(dev, false); +diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c +index 66fe559..06cedd9 100644 +--- a/drivers/gpu/drm/nouveau/nv04_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv04_fifo.c +@@ -112,6 +112,12 @@ nv04_fifo_channel_id(struct drm_device *dev) + NV03_PFIFO_CACHE1_PUSH1_CHID_MASK; + } + ++#ifdef __BIG_ENDIAN ++#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN ++#else ++#define DMA_FETCH_ENDIANNESS 0 ++#endif ++ + int + nv04_fifo_create_context(struct nouveau_channel *chan) + { +@@ -131,18 +137,13 @@ nv04_fifo_create_context(struct nouveau_channel *chan) + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + + /* Setup initial state */ +- dev_priv->engine.instmem.prepare_access(dev, true); + RAMFC_WR(DMA_PUT, chan->pushbuf_base); + RAMFC_WR(DMA_GET, chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); + RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +-#ifdef __BIG_ENDIAN +- NV_PFIFO_CACHE1_BIG_ENDIAN | +-#endif +- 0)); +- dev_priv->engine.instmem.finish_access(dev); ++ DMA_FETCH_ENDIANNESS)); + + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, +@@ -169,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid) + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV04_RAMFC(chid), tmp; + +- dev_priv->engine.instmem.prepare_access(dev, false); +- + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); + tmp = nv_ri32(dev, fc + 8); +@@ -181,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid) + nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); + +- dev_priv->engine.instmem.finish_access(dev); +- + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); + } +@@ -223,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev) + return -EINVAL; + } + +- dev_priv->engine.instmem.prepare_access(dev, true); + RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; +@@ -233,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev) + RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); + RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); +- dev_priv->engine.instmem.finish_access(dev); + + nv04_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); +@@ -297,6 +292,7 @@ nv04_fifo_init(struct drm_device *dev) + + nv04_fifo_init_intr(dev); + pfifo->enable(dev); ++ pfifo->reassign(dev, true); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + if (dev_priv->fifos[i]) { +diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c +index 618355e..c897342 100644 +--- a/drivers/gpu/drm/nouveau/nv04_graph.c ++++ b/drivers/gpu/drm/nouveau/nv04_graph.c +@@ -342,7 +342,7 @@ static uint32_t nv04_graph_ctx_regs[] = { + }; + + struct graph_state { +- int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; ++ uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; + }; + + struct nouveau_channel * +@@ -527,8 +527,7 @@ static int + nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) + { +- chan->fence.last_sequence_irq = data; +- nouveau_fence_handler(chan->dev, chan->id); ++ atomic_set(&chan->fence.last_sequence_irq, data); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c +index a3b9563..4408232 100644 +--- a/drivers/gpu/drm/nouveau/nv04_instmem.c ++++ b/drivers/gpu/drm/nouveau/nv04_instmem.c +@@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev) + NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); + + /* Clear all of it, except the BIOS image that's in the first 64KiB */ +- dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) + nv_wi32(dev, i, 0x00000000); +- dev_priv->engine.instmem.finish_access(dev); + } + + static void +@@ -106,7 +104,7 @@ int nv04_instmem_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t offset; +- int ret = 0; ++ int ret; + + nv04_instmem_determine_amount(dev); + nv04_instmem_configure_fixed_tables(dev); +@@ -129,14 +127,14 @@ int nv04_instmem_init(struct drm_device *dev) + offset = 0x40000; + } + +- ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, +- offset, dev_priv->ramin_rsvd_vram - offset); ++ ret = drm_mm_init(&dev_priv->ramin_heap, offset, ++ dev_priv->ramin_rsvd_vram - offset); + if (ret) { +- dev_priv->ramin_heap = NULL; +- NV_ERROR(dev, "Failed to init RAMIN heap\n"); ++ NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret); ++ return ret; + } + +- return ret; ++ return 0; + } + + void +@@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) + } + + void +-nv04_instmem_prepare_access(struct drm_device *dev, bool write) +-{ +-} +- +-void +-nv04_instmem_finish_access(struct drm_device *dev) ++nv04_instmem_flush(struct drm_device *dev) + { + } + +diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c +index 617ed1e..2af43a1 100644 +--- a/drivers/gpu/drm/nouveau/nv04_mc.c ++++ b/drivers/gpu/drm/nouveau/nv04_mc.c +@@ -11,6 +11,10 @@ nv04_mc_init(struct drm_device *dev) + */ + + nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ /* Disable PROM access. */ ++ nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c +index c4e3404..84b5954 100644 +--- a/drivers/gpu/drm/nouveau/nv04_tv.c ++++ b/drivers/gpu/drm/nouveau/nv04_tv.c +@@ -223,10 +223,12 @@ static void nv04_tv_destroy(struct drm_encoder *encoder) + kfree(nv_encoder); + } + +-int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry) + { + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; ++ struct drm_device *dev = connector->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct i2c_adapter *adap; + struct drm_encoder_funcs *funcs = NULL; +@@ -266,7 +268,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) + + was_locked = NVLockVgaCrtcs(dev, false); + +- ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap, ++ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), adap, + &nv04_tv_encoder_info[type].board_info); + + NVLockVgaCrtcs(dev, was_locked); +@@ -294,7 +296,9 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) + + /* Set the slave encoder configuration */ + sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params); ++ sfuncs->create_resources(encoder, connector); + ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + + fail: +diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c +index 7aeabf2..7a4069c 100644 +--- a/drivers/gpu/drm/nouveau/nv10_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv10_fifo.c +@@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ +- dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, chan->pushbuf_base); + nv_wi32(dev, fc + 4, chan->pushbuf_base); + nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); +@@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) + NV_PFIFO_CACHE1_BIG_ENDIAN | + #endif + 0); +- dev_priv->engine.instmem.finish_access(dev); + + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, +@@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid) + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV10_RAMFC(chid), tmp; + +- dev_priv->engine.instmem.prepare_access(dev, false); +- + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); + nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); +@@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid) + nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); + + out: +- dev_priv->engine.instmem.finish_access(dev); +- + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); + } +@@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev) + return 0; + fc = NV10_RAMFC(chid); + +- dev_priv->engine.instmem.prepare_access(dev, true); +- + nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); +@@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev) + nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + + out: +- dev_priv->engine.instmem.finish_access(dev); +- + nv10_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); + return 0; +diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c +index 74c8803..359506e 100644 +--- a/drivers/gpu/drm/nouveau/nv17_tv.c ++++ b/drivers/gpu/drm/nouveau/nv17_tv.c +@@ -116,6 +116,20 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) + return sample; + } + ++static bool ++get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) ++{ ++ /* Zotac FX5200 */ ++ if ((dev->pdev->device == 0x0322) && ++ (dev->pdev->subsystem_vendor == 0x19da) && ++ (dev->pdev->subsystem_device == 0x2035)) { ++ *pin_mask = 0xc; ++ return false; ++ } ++ ++ return true; ++} ++ + static enum drm_connector_status + nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) + { +@@ -124,12 +138,20 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) + struct drm_mode_config *conf = &dev->mode_config; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct dcb_entry *dcb = tv_enc->base.dcb; ++ bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); + +- if (dev_priv->chipset == 0x42 || +- dev_priv->chipset == 0x43) +- tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; +- else +- tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; ++ if (nv04_dac_in_use(encoder)) ++ return connector_status_disconnected; ++ ++ if (reliable) { ++ if (dev_priv->chipset == 0x42 || ++ dev_priv->chipset == 0x43) ++ tv_enc->pin_mask = ++ nv42_tv_sample_load(encoder) >> 28 & 0xe; ++ else ++ tv_enc->pin_mask = ++ nv17_dac_sample_load(encoder) >> 28 & 0xe; ++ } + + switch (tv_enc->pin_mask) { + case 0x2: +@@ -154,7 +176,9 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) + conf->tv_subconnector_property, + tv_enc->subconnector); + +- if (tv_enc->subconnector) { ++ if (!reliable) { ++ return connector_status_unknown; ++ } else if (tv_enc->subconnector) { + NV_INFO(dev, "Load detected on output %c\n", + '@' + ffs(dcb->or)); + return connector_status_connected; +@@ -296,6 +320,9 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, + { + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + ++ if (nv04_dac_in_use(encoder)) ++ return false; ++ + if (tv_norm->kind == CTV_ENC_MODE) + adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; + else +@@ -744,8 +771,10 @@ static struct drm_encoder_funcs nv17_tv_funcs = { + .destroy = nv17_tv_destroy, + }; + +-int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) + { ++ struct drm_device *dev = connector->dev; + struct drm_encoder *encoder; + struct nv17_tv_encoder *tv_enc = NULL; + +@@ -774,5 +803,7 @@ int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + ++ nv17_tv_create_resources(encoder, connector); ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c +index d6fc0a8..191c15c 100644 +--- a/drivers/gpu/drm/nouveau/nv20_graph.c ++++ b/drivers/gpu/drm/nouveau/nv20_graph.c +@@ -370,68 +370,54 @@ nv20_graph_create_context(struct nouveau_channel *chan) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); +- unsigned int ctx_size; + unsigned int idoffs = 0x28/4; + int ret; + + switch (dev_priv->chipset) { + case 0x20: +- ctx_size = NV20_GRCTX_SIZE; + ctx_init = nv20_graph_context_init; + idoffs = 0; + break; + case 0x25: + case 0x28: +- ctx_size = NV25_GRCTX_SIZE; + ctx_init = nv25_graph_context_init; + break; + case 0x2a: +- ctx_size = NV2A_GRCTX_SIZE; + ctx_init = nv2a_graph_context_init; + idoffs = 0; + break; + case 0x30: + case 0x31: +- ctx_size = NV30_31_GRCTX_SIZE; + ctx_init = nv30_31_graph_context_init; + break; + case 0x34: +- ctx_size = NV34_GRCTX_SIZE; + ctx_init = nv34_graph_context_init; + break; + case 0x35: + case 0x36: +- ctx_size = NV35_36_GRCTX_SIZE; + ctx_init = nv35_36_graph_context_init; + break; + default: +- ctx_size = 0; +- ctx_init = nv35_36_graph_context_init; +- NV_ERROR(dev, "Please contact the devs if you want your NV%x" +- " card to work\n", dev_priv->chipset); +- return -ENOSYS; +- break; ++ BUG_ON(1); + } + +- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, +- NVOBJ_FLAG_ZERO_ALLOC, +- &chan->ramin_grctx); ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, ++ 16, NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx); + if (ret) + return ret; + + /* Initialise default context values */ +- dev_priv->engine.instmem.prepare_access(dev, true); + ctx_init(dev, chan->ramin_grctx->gpuobj); + + /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ + nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, + (chan->id << 24) | 0x1); /* CTX_USER */ + +- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, +- chan->ramin_grctx->instance >> 4); +- +- dev_priv->engine.instmem.finish_access(dev); ++ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, ++ chan->ramin_grctx->instance >> 4); + return 0; + } + +@@ -440,13 +426,12 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + + if (chan->ramin_grctx) + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + +- dev_priv->engine.instmem.prepare_access(dev, true); +- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0); +- dev_priv->engine.instmem.finish_access(dev); ++ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0); + } + + int +@@ -538,29 +523,44 @@ nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, + int + nv20_graph_init(struct drm_device *dev) + { +- struct drm_nouveau_private *dev_priv = +- (struct drm_nouveau_private *)dev->dev_private; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + uint32_t tmp, vramsz; + int ret, i; + ++ switch (dev_priv->chipset) { ++ case 0x20: ++ pgraph->grctx_size = NV20_GRCTX_SIZE; ++ break; ++ case 0x25: ++ case 0x28: ++ pgraph->grctx_size = NV25_GRCTX_SIZE; ++ break; ++ case 0x2a: ++ pgraph->grctx_size = NV2A_GRCTX_SIZE; ++ break; ++ default: ++ NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); ++ pgraph->accel_blocked = true; ++ return 0; ++ } ++ + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + +- if (!dev_priv->ctx_table) { ++ if (!pgraph->ctx_table) { + /* Create Context Pointer Table */ +- dev_priv->ctx_table_size = 32 * 4; +- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, +- dev_priv->ctx_table_size, 16, ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, +- &dev_priv->ctx_table); ++ &pgraph->ctx_table); + if (ret) + return ret; + } + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, +- dev_priv->ctx_table->instance >> 4); ++ pgraph->ctx_table->instance >> 4); + + nv20_graph_rdi(dev); + +@@ -644,34 +644,52 @@ void + nv20_graph_takedown(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + +- nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); ++ nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); + } + + int + nv30_graph_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + int ret, i; + ++ switch (dev_priv->chipset) { ++ case 0x30: ++ case 0x31: ++ pgraph->grctx_size = NV30_31_GRCTX_SIZE; ++ break; ++ case 0x34: ++ pgraph->grctx_size = NV34_GRCTX_SIZE; ++ break; ++ case 0x35: ++ case 0x36: ++ pgraph->grctx_size = NV35_36_GRCTX_SIZE; ++ break; ++ default: ++ NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); ++ pgraph->accel_blocked = true; ++ return 0; ++ } ++ + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + +- if (!dev_priv->ctx_table) { ++ if (!pgraph->ctx_table) { + /* Create Context Pointer Table */ +- dev_priv->ctx_table_size = 32 * 4; +- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, +- dev_priv->ctx_table_size, 16, ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, +- &dev_priv->ctx_table); ++ &pgraph->ctx_table); + if (ret) + return ret; + } + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, +- dev_priv->ctx_table->instance >> 4); ++ pgraph->ctx_table->instance >> 4); + + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); +diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c +index 500ccfd..2b67f18 100644 +--- a/drivers/gpu/drm/nouveau/nv40_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv40_fifo.c +@@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + +- dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, chan->pushbuf_base); + nv_wi32(dev, fc + 4, chan->pushbuf_base); + nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); +@@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) + 0x30000000 /* no idea.. */); + nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); + nv_wi32(dev, fc + 60, 0x0001FFFF); +- dev_priv->engine.instmem.finish_access(dev); + + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, +@@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid) + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; + +- dev_priv->engine.instmem.prepare_access(dev, false); +- + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); + nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); +@@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid) + nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); + nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); + +- dev_priv->engine.instmem.finish_access(dev); +- + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); + } +@@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev) + return 0; + fc = NV40_RAMFC(chid); + +- dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); +@@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev) + tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); + nv_wi32(dev, fc + 72, tmp); + #endif +- dev_priv->engine.instmem.finish_access(dev); + + nv40_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, +diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c +index 704a25d..ef550ce 100644 +--- a/drivers/gpu/drm/nouveau/nv40_graph.c ++++ b/drivers/gpu/drm/nouveau/nv40_graph.c +@@ -58,6 +58,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; ++ struct nouveau_grctx ctx = {}; + int ret; + + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, +@@ -67,20 +68,13 @@ nv40_graph_create_context(struct nouveau_channel *chan) + return ret; + + /* Initialise default context values */ +- dev_priv->engine.instmem.prepare_access(dev, true); +- if (!pgraph->ctxprog) { +- struct nouveau_grctx ctx = {}; +- +- ctx.dev = chan->dev; +- ctx.mode = NOUVEAU_GRCTX_VALS; +- ctx.data = chan->ramin_grctx->gpuobj; +- nv40_grctx_init(&ctx); +- } else { +- nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj); +- } ++ ctx.dev = chan->dev; ++ ctx.mode = NOUVEAU_GRCTX_VALS; ++ ctx.data = chan->ramin_grctx->gpuobj; ++ nv40_grctx_init(&ctx); ++ + nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, + chan->ramin_grctx->gpuobj->im_pramin->start); +- dev_priv->engine.instmem.finish_access(dev); + return 0; + } + +@@ -238,7 +232,8 @@ nv40_graph_init(struct drm_device *dev) + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; +- uint32_t vramsz; ++ struct nouveau_grctx ctx = {}; ++ uint32_t vramsz, *cp; + int i, j; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & +@@ -246,32 +241,22 @@ nv40_graph_init(struct drm_device *dev) + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + +- if (nouveau_ctxfw) { +- nouveau_grctx_prog_load(dev); +- dev_priv->engine.graph.grctx_size = 175 * 1024; +- } ++ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); ++ if (!cp) ++ return -ENOMEM; + +- if (!dev_priv->engine.graph.ctxprog) { +- struct nouveau_grctx ctx = {}; +- uint32_t *cp; ++ ctx.dev = dev; ++ ctx.mode = NOUVEAU_GRCTX_PROG; ++ ctx.data = cp; ++ ctx.ctxprog_max = 256; ++ nv40_grctx_init(&ctx); ++ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; + +- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); +- if (!cp) +- return -ENOMEM; ++ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ for (i = 0; i < ctx.ctxprog_len; i++) ++ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); + +- ctx.dev = dev; +- ctx.mode = NOUVEAU_GRCTX_PROG; +- ctx.data = cp; +- ctx.ctxprog_max = 256; +- nv40_grctx_init(&ctx); +- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; +- +- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); +- for (i = 0; i < ctx.ctxprog_len; i++) +- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); +- +- kfree(cp); +- } ++ kfree(cp); + + /* No context present currently */ + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); +@@ -407,7 +392,6 @@ nv40_graph_init(struct drm_device *dev) + + void nv40_graph_takedown(struct drm_device *dev) + { +- nouveau_grctx_fini(dev); + } + + struct nouveau_pgraph_object_class nv40_graph_grclass[] = { +diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c +index b4e4a3b..5d11ea1 100644 +--- a/drivers/gpu/drm/nouveau/nv50_crtc.c ++++ b/drivers/gpu/drm/nouveau/nv50_crtc.c +@@ -440,47 +440,15 @@ nv50_crtc_prepare(struct drm_crtc *crtc) + { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; +- struct drm_encoder *encoder; +- uint32_t dac = 0, sor = 0; + + NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + +- /* Disconnect all unused encoders. */ +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); +- +- if (!drm_helper_encoder_in_use(encoder)) +- continue; +- +- if (nv_encoder->dcb->type == OUTPUT_ANALOG || +- nv_encoder->dcb->type == OUTPUT_TV) +- dac |= (1 << nv_encoder->or); +- else +- sor |= (1 << nv_encoder->or); +- } +- +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); +- +- if (nv_encoder->dcb->type == OUTPUT_ANALOG || +- nv_encoder->dcb->type == OUTPUT_TV) { +- if (dac & (1 << nv_encoder->or)) +- continue; +- } else { +- if (sor & (1 << nv_encoder->or)) +- continue; +- } +- +- nv_encoder->disconnect(nv_encoder); +- } +- + nv50_crtc_blank(nv_crtc, true); + } + + static void + nv50_crtc_commit(struct drm_crtc *crtc) + { +- struct drm_crtc *crtc2; + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; +@@ -491,20 +459,14 @@ nv50_crtc_commit(struct drm_crtc *crtc) + + nv50_crtc_blank(nv_crtc, false); + +- /* Explicitly blank all unused crtc's. */ +- list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) { +- if (!drm_helper_crtc_in_use(crtc2)) +- nv50_crtc_blank(nouveau_crtc(crtc2), true); +- } +- + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(dev, "no space while committing crtc\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); +- OUT_RING(evo, 0); +- FIRE_RING(evo); ++ OUT_RING (evo, 0); ++ FIRE_RING (evo); + } + + static bool +diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c +index 1fd9537..1bc0859 100644 +--- a/drivers/gpu/drm/nouveau/nv50_dac.c ++++ b/drivers/gpu/drm/nouveau/nv50_dac.c +@@ -37,22 +37,31 @@ + #include "nv50_display.h" + + static void +-nv50_dac_disconnect(struct nouveau_encoder *nv_encoder) ++nv50_dac_disconnect(struct drm_encoder *encoder) + { +- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; ++ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); ++ struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + int ret; + ++ if (!nv_encoder->crtc) ++ return; ++ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); ++ + NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); + +- ret = RING_SPACE(evo, 2); ++ ret = RING_SPACE(evo, 4); + if (ret) { + NV_ERROR(dev, "no space while disconnecting DAC\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); +- OUT_RING(evo, 0); ++ OUT_RING (evo, 0); ++ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); ++ OUT_RING (evo, 0); ++ ++ nv_encoder->crtc = NULL; + } + + static enum drm_connector_status +@@ -213,7 +222,8 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + uint32_t mode_ctl = 0, mode_ctl2 = 0; + int ret; + +- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); ++ NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", ++ nv_encoder->or, nv_encoder->dcb->type, crtc->index); + + nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); + +@@ -243,6 +253,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); + OUT_RING(evo, mode_ctl); + OUT_RING(evo, mode_ctl2); ++ ++ nv_encoder->crtc = encoder->crtc; ++} ++ ++static struct drm_crtc * ++nv50_dac_crtc_get(struct drm_encoder *encoder) ++{ ++ return nouveau_encoder(encoder)->crtc; + } + + static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { +@@ -253,7 +271,9 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { + .prepare = nv50_dac_prepare, + .commit = nv50_dac_commit, + .mode_set = nv50_dac_mode_set, +- .detect = nv50_dac_detect ++ .get_crtc = nv50_dac_crtc_get, ++ .detect = nv50_dac_detect, ++ .disable = nv50_dac_disconnect + }; + + static void +@@ -275,14 +295,11 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { + }; + + int +-nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) ++nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) + { + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + +- NV_DEBUG_KMS(dev, "\n"); +- NV_INFO(dev, "Detected a DAC output\n"); +- + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; +@@ -291,14 +308,14 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + +- nv_encoder->disconnect = nv50_dac_disconnect; +- +- drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs, ++ drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; ++ ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c +index e6a44af..7d59e91 100644 +--- a/drivers/gpu/drm/nouveau/nv50_display.c ++++ b/drivers/gpu/drm/nouveau/nv50_display.c +@@ -71,14 +71,13 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, + return ret; + } + +- dev_priv->engine.instmem.prepare_access(dev, true); + nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); + nv_wo32(dev, obj, 1, limit); + nv_wo32(dev, obj, 2, offset); + nv_wo32(dev, obj, 3, 0x00000000); + nv_wo32(dev, obj, 4, 0x00000000); + nv_wo32(dev, obj, 5, 0x00010000); +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + return 0; + } +@@ -110,8 +109,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) + return ret; + } + +- ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj-> +- im_pramin->start, 32768); ++ ret = drm_mm_init(&chan->ramin_heap, ++ chan->ramin->gpuobj->im_pramin->start, 32768); + if (ret) { + NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); + nv50_evo_channel_del(pchan); +@@ -465,6 +464,7 @@ int nv50_display_create(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_table *dcb = &dev_priv->vbios.dcb; ++ struct drm_connector *connector, *ct; + int ret, i; + + NV_DEBUG_KMS(dev, "\n"); +@@ -507,14 +507,18 @@ int nv50_display_create(struct drm_device *dev) + continue; + } + ++ connector = nouveau_connector_create(dev, entry->connector); ++ if (IS_ERR(connector)) ++ continue; ++ + switch (entry->type) { + case OUTPUT_TMDS: + case OUTPUT_LVDS: + case OUTPUT_DP: +- nv50_sor_create(dev, entry); ++ nv50_sor_create(connector, entry); + break; + case OUTPUT_ANALOG: +- nv50_dac_create(dev, entry); ++ nv50_dac_create(connector, entry); + break; + default: + NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); +@@ -522,11 +526,13 @@ int nv50_display_create(struct drm_device *dev) + } + } + +- for (i = 0 ; i < dcb->connector.entries; i++) { +- if (i != 0 && dcb->connector.entry[i].index2 == +- dcb->connector.entry[i - 1].index2) +- continue; +- nouveau_connector_create(dev, &dcb->connector.entry[i]); ++ list_for_each_entry_safe(connector, ct, ++ &dev->mode_config.connector_list, head) { ++ if (!connector->encoder_ids[0]) { ++ NV_WARN(dev, "%s has no encoders, removing\n", ++ drm_get_connector_name(connector)); ++ connector->funcs->destroy(connector); ++ } + } + + ret = nv50_display_init(dev); +@@ -552,131 +558,28 @@ int nv50_display_destroy(struct drm_device *dev) + return 0; + } + +-static inline uint32_t +-nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t mc; +- +- if (sor) { +- if (dev_priv->chipset < 0x90 || +- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) +- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or)); +- else +- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or)); +- } else { +- mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or)); +- } +- +- return mc; +-} +- +-static int +-nv50_display_irq_head(struct drm_device *dev, int *phead, +- struct dcb_entry **pdcbent) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL); +- uint32_t dac = 0, sor = 0; +- int head, i, or = 0, type = OUTPUT_ANY; +- +- /* We're assuming that head 0 *or* head 1 will be active here, +- * and not both. I'm not sure if the hw will even signal both +- * ever, but it definitely shouldn't for us as we commit each +- * CRTC separately, and submission will be blocked by the GPU +- * until we handle each in turn. +- */ +- NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); +- head = ffs((unk30 >> 9) & 3) - 1; +- if (head < 0) +- return -EINVAL; +- +- /* This assumes CRTCs are never bound to multiple encoders, which +- * should be the case. +- */ +- for (i = 0; i < 3 && type == OUTPUT_ANY; i++) { +- uint32_t mc = nv50_display_mode_ctrl(dev, false, i); +- if (!(mc & (1 << head))) +- continue; +- +- switch ((mc >> 8) & 0xf) { +- case 0: type = OUTPUT_ANALOG; break; +- case 1: type = OUTPUT_TV; break; +- default: +- NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac); +- return -1; +- } +- +- or = i; +- } +- +- for (i = 0; i < 4 && type == OUTPUT_ANY; i++) { +- uint32_t mc = nv50_display_mode_ctrl(dev, true, i); +- if (!(mc & (1 << head))) +- continue; +- +- switch ((mc >> 8) & 0xf) { +- case 0: type = OUTPUT_LVDS; break; +- case 1: type = OUTPUT_TMDS; break; +- case 2: type = OUTPUT_TMDS; break; +- case 5: type = OUTPUT_TMDS; break; +- case 8: type = OUTPUT_DP; break; +- case 9: type = OUTPUT_DP; break; +- default: +- NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor); +- return -1; +- } +- +- or = i; +- } +- +- NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or); +- if (type == OUTPUT_ANY) { +- NV_ERROR(dev, "unknown encoder!!\n"); +- return -1; +- } +- +- for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { +- struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i]; +- +- if (dcbent->type != type) +- continue; +- +- if (!(dcbent->or & (1 << or))) +- continue; +- +- *phead = head; +- *pdcbent = dcbent; +- return 0; +- } +- +- NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or); +- return 0; +-} +- +-static uint32_t +-nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, +- int pxclk) ++static u16 ++nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb, ++ u32 mc, int pxclk) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = NULL; + struct drm_encoder *encoder; + struct nvbios *bios = &dev_priv->vbios; +- uint32_t mc, script = 0, or; ++ u32 script = 0, or; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + +- if (nv_encoder->dcb != dcbent) ++ if (nv_encoder->dcb != dcb) + continue; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + break; + } + +- or = ffs(dcbent->or) - 1; +- mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); +- switch (dcbent->type) { ++ or = ffs(dcb->or) - 1; ++ switch (dcb->type) { + case OUTPUT_LVDS: + script = (mc >> 8) & 0xf; + if (bios->fp_no_ddc) { +@@ -767,17 +670,88 @@ nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) + static void + nv50_display_unk10_handler(struct drm_device *dev) + { +- struct dcb_entry *dcbent; +- int head, ret; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ u32 unk30 = nv_rd32(dev, 0x610030), mc; ++ int i, crtc, or, type = OUTPUT_ANY; + +- ret = nv50_display_irq_head(dev, &head, &dcbent); +- if (ret) +- goto ack; ++ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); ++ dev_priv->evo_irq.dcb = NULL; + + nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); + +- nouveau_bios_run_display_table(dev, dcbent, 0, -1); ++ /* Determine which CRTC we're dealing with, only 1 ever will be ++ * signalled at the same time with the current nouveau code. ++ */ ++ crtc = ffs((unk30 & 0x00000060) >> 5) - 1; ++ if (crtc < 0) ++ goto ack; ++ ++ /* Nothing needs to be done for the encoder */ ++ crtc = ffs((unk30 & 0x00000180) >> 7) - 1; ++ if (crtc < 0) ++ goto ack; ++ ++ /* Find which encoder was connected to the CRTC */ ++ for (i = 0; type == OUTPUT_ANY && i < 3; i++) { ++ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); ++ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); ++ if (!(mc & (1 << crtc))) ++ continue; ++ ++ switch ((mc & 0x00000f00) >> 8) { ++ case 0: type = OUTPUT_ANALOG; break; ++ case 1: type = OUTPUT_TV; break; ++ default: ++ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); ++ goto ack; ++ } ++ ++ or = i; ++ } ++ ++ for (i = 0; type == OUTPUT_ANY && i < 4; i++) { ++ if (dev_priv->chipset < 0x90 || ++ dev_priv->chipset == 0x92 || ++ dev_priv->chipset == 0xa0) ++ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); ++ else ++ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); + ++ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); ++ if (!(mc & (1 << crtc))) ++ continue; ++ ++ switch ((mc & 0x00000f00) >> 8) { ++ case 0: type = OUTPUT_LVDS; break; ++ case 1: type = OUTPUT_TMDS; break; ++ case 2: type = OUTPUT_TMDS; break; ++ case 5: type = OUTPUT_TMDS; break; ++ case 8: type = OUTPUT_DP; break; ++ case 9: type = OUTPUT_DP; break; ++ default: ++ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); ++ goto ack; ++ } ++ ++ or = i; ++ } ++ ++ /* There was no encoder to disable */ ++ if (type == OUTPUT_ANY) ++ goto ack; ++ ++ /* Disable the encoder */ ++ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { ++ struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; ++ ++ if (dcb->type == type && (dcb->or & (1 << or))) { ++ nouveau_bios_run_display_table(dev, dcb, 0, -1); ++ dev_priv->evo_irq.dcb = dcb; ++ goto ack; ++ } ++ } ++ ++ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); + ack: + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); + nv_wr32(dev, 0x610030, 0x80000000); +@@ -817,33 +791,103 @@ nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) + static void + nv50_display_unk20_handler(struct drm_device *dev) + { +- struct dcb_entry *dcbent; +- uint32_t tmp, pclk, script; +- int head, or, ret; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc; ++ struct dcb_entry *dcb; ++ int i, crtc, or, type = OUTPUT_ANY; + +- ret = nv50_display_irq_head(dev, &head, &dcbent); +- if (ret) ++ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); ++ dcb = dev_priv->evo_irq.dcb; ++ if (dcb) { ++ nouveau_bios_run_display_table(dev, dcb, 0, -2); ++ dev_priv->evo_irq.dcb = NULL; ++ } ++ ++ /* CRTC clock change requested? */ ++ crtc = ffs((unk30 & 0x00000600) >> 9) - 1; ++ if (crtc >= 0) { ++ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); ++ pclk &= 0x003fffff; ++ ++ nv50_crtc_set_clock(dev, crtc, pclk); ++ ++ tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); ++ tmp &= ~0x000000f; ++ nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); ++ } ++ ++ /* Nothing needs to be done for the encoder */ ++ crtc = ffs((unk30 & 0x00000180) >> 7) - 1; ++ if (crtc < 0) + goto ack; +- or = ffs(dcbent->or) - 1; +- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; +- script = nv50_display_script_select(dev, dcbent, pclk); ++ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; + +- NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk); ++ /* Find which encoder is connected to the CRTC */ ++ for (i = 0; type == OUTPUT_ANY && i < 3; i++) { ++ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); ++ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); ++ if (!(mc & (1 << crtc))) ++ continue; + +- if (dcbent->type != OUTPUT_DP) +- nouveau_bios_run_display_table(dev, dcbent, 0, -2); ++ switch ((mc & 0x00000f00) >> 8) { ++ case 0: type = OUTPUT_ANALOG; break; ++ case 1: type = OUTPUT_TV; break; ++ default: ++ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); ++ goto ack; ++ } + +- nv50_crtc_set_clock(dev, head, pclk); ++ or = i; ++ } + +- nouveau_bios_run_display_table(dev, dcbent, script, pclk); ++ for (i = 0; type == OUTPUT_ANY && i < 4; i++) { ++ if (dev_priv->chipset < 0x90 || ++ dev_priv->chipset == 0x92 || ++ dev_priv->chipset == 0xa0) ++ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); ++ else ++ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); + +- nv50_display_unk20_dp_hack(dev, dcbent); ++ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); ++ if (!(mc & (1 << crtc))) ++ continue; + +- tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); +- tmp &= ~0x000000f; +- nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); ++ switch ((mc & 0x00000f00) >> 8) { ++ case 0: type = OUTPUT_LVDS; break; ++ case 1: type = OUTPUT_TMDS; break; ++ case 2: type = OUTPUT_TMDS; break; ++ case 5: type = OUTPUT_TMDS; break; ++ case 8: type = OUTPUT_DP; break; ++ case 9: type = OUTPUT_DP; break; ++ default: ++ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); ++ goto ack; ++ } ++ ++ or = i; ++ } ++ ++ if (type == OUTPUT_ANY) ++ goto ack; ++ ++ /* Enable the encoder */ ++ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { ++ dcb = &dev_priv->vbios.dcb.entry[i]; ++ if (dcb->type == type && (dcb->or & (1 << or))) ++ break; ++ } ++ ++ if (i == dev_priv->vbios.dcb.entries) { ++ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); ++ goto ack; ++ } ++ ++ script = nv50_display_script_select(dev, dcb, mc, pclk); ++ nouveau_bios_run_display_table(dev, dcb, script, pclk); ++ ++ nv50_display_unk20_dp_hack(dev, dcb); + +- if (dcbent->type != OUTPUT_ANALOG) { ++ if (dcb->type != OUTPUT_ANALOG) { + tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); + tmp &= ~0x00000f0f; + if (script & 0x0100) +@@ -853,24 +897,61 @@ nv50_display_unk20_handler(struct drm_device *dev) + nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); + } + ++ dev_priv->evo_irq.dcb = dcb; ++ dev_priv->evo_irq.pclk = pclk; ++ dev_priv->evo_irq.script = script; ++ + ack: + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); + nv_wr32(dev, 0x610030, 0x80000000); + } + ++/* If programming a TMDS output on a SOR that can also be configured for ++ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. ++ * ++ * It looks like the VBIOS TMDS scripts make an attempt at this, however, ++ * the VBIOS scripts on at least one board I have only switch it off on ++ * link 0, causing a blank display if the output has previously been ++ * programmed for DisplayPort. ++ */ ++static void ++nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb) ++{ ++ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); ++ struct drm_encoder *encoder; ++ u32 tmp; ++ ++ if (dcb->type != OUTPUT_TMDS) ++ return; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); ++ ++ if (nv_encoder->dcb->type == OUTPUT_DP && ++ nv_encoder->dcb->or & (1 << or)) { ++ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); ++ tmp &= ~NV50_SOR_DP_CTRL_ENABLED; ++ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); ++ break; ++ } ++ } ++} ++ + static void + nv50_display_unk40_handler(struct drm_device *dev) + { +- struct dcb_entry *dcbent; +- int head, pclk, script, ret; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct dcb_entry *dcb = dev_priv->evo_irq.dcb; ++ u16 script = dev_priv->evo_irq.script; ++ u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk; + +- ret = nv50_display_irq_head(dev, &head, &dcbent); +- if (ret) ++ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); ++ dev_priv->evo_irq.dcb = NULL; ++ if (!dcb) + goto ack; +- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; +- script = nv50_display_script_select(dev, dcbent, pclk); + +- nouveau_bios_run_display_table(dev, dcbent, script, -pclk); ++ nouveau_bios_run_display_table(dev, dcb, script, -pclk); ++ nv50_display_unk40_dp_set_tmds(dev, dcb); + + ack: + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); +diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c +index e20c0e2..fb0281a 100644 +--- a/drivers/gpu/drm/nouveau/nv50_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv50_fifo.c +@@ -28,41 +28,33 @@ + #include "drm.h" + #include "nouveau_drv.h" + +-struct nv50_fifo_priv { +- struct nouveau_gpuobj_ref *thingo[2]; +- int cur_thingo; +-}; +- +-#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) +- + static void +-nv50_fifo_init_thingo(struct drm_device *dev) ++nv50_fifo_playlist_update(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; ++ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_gpuobj_ref *cur; + int i, nr; + + NV_DEBUG(dev, "\n"); + +- cur = priv->thingo[priv->cur_thingo]; +- priv->cur_thingo = !priv->cur_thingo; ++ cur = pfifo->playlist[pfifo->cur_playlist]; ++ pfifo->cur_playlist = !pfifo->cur_playlist; + + /* We never schedule channel 0 or 127 */ +- dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 1, nr = 0; i < 127; i++) { + if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) + nv_wo32(dev, cur->gpuobj, nr++, i); + } +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + nv_wr32(dev, 0x32f4, cur->instance >> 12); + nv_wr32(dev, 0x32ec, nr); + nv_wr32(dev, 0x2500, 0x101); + } + +-static int +-nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) ++static void ++nv50_fifo_channel_enable(struct drm_device *dev, int channel) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->fifos[channel]; +@@ -70,37 +62,28 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) + + NV_DEBUG(dev, "ch%d\n", channel); + +- if (!chan->ramfc) +- return -EINVAL; +- +- if (IS_G80) ++ if (dev_priv->chipset == 0x50) + inst = chan->ramfc->instance >> 12; + else + inst = chan->ramfc->instance >> 8; +- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), +- inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); + +- if (!nt) +- nv50_fifo_init_thingo(dev); +- return 0; ++ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | ++ NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); + } + + static void +-nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt) ++nv50_fifo_channel_disable(struct drm_device *dev, int channel) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst; + +- NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt); ++ NV_DEBUG(dev, "ch%d\n", channel); + +- if (IS_G80) ++ if (dev_priv->chipset == 0x50) + inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; + else + inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; + nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); +- +- if (!nt) +- nv50_fifo_init_thingo(dev); + } + + static void +@@ -133,12 +116,12 @@ nv50_fifo_init_context_table(struct drm_device *dev) + + for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { + if (dev_priv->fifos[i]) +- nv50_fifo_channel_enable(dev, i, true); ++ nv50_fifo_channel_enable(dev, i); + else +- nv50_fifo_channel_disable(dev, i, true); ++ nv50_fifo_channel_disable(dev, i); + } + +- nv50_fifo_init_thingo(dev); ++ nv50_fifo_playlist_update(dev); + } + + static void +@@ -162,41 +145,38 @@ nv50_fifo_init_regs(struct drm_device *dev) + nv_wr32(dev, 0x3270, 0); + + /* Enable dummy channels setup by nv50_instmem.c */ +- nv50_fifo_channel_enable(dev, 0, true); +- nv50_fifo_channel_enable(dev, 127, true); ++ nv50_fifo_channel_enable(dev, 0); ++ nv50_fifo_channel_enable(dev, 127); + } + + int + nv50_fifo_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nv50_fifo_priv *priv; ++ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + int ret; + + NV_DEBUG(dev, "\n"); + +- priv = dev_priv->engine.fifo.priv; +- if (priv) { +- priv->cur_thingo = !priv->cur_thingo; ++ if (pfifo->playlist[0]) { ++ pfifo->cur_playlist = !pfifo->cur_playlist; + goto just_reset; + } + +- priv = kzalloc(sizeof(*priv), GFP_KERNEL); +- if (!priv) +- return -ENOMEM; +- dev_priv->engine.fifo.priv = priv; +- + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, +- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &pfifo->playlist[0]); + if (ret) { +- NV_ERROR(dev, "error creating thingo0: %d\n", ret); ++ NV_ERROR(dev, "error creating playlist 0: %d\n", ret); + return ret; + } + + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, +- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &pfifo->playlist[1]); + if (ret) { +- NV_ERROR(dev, "error creating thingo1: %d\n", ret); ++ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); ++ NV_ERROR(dev, "error creating playlist 1: %d\n", ret); + return ret; + } + +@@ -216,18 +196,15 @@ void + nv50_fifo_takedown(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; ++ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + + NV_DEBUG(dev, "\n"); + +- if (!priv) ++ if (!pfifo->playlist[0]) + return; + +- nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); +- nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); +- +- dev_priv->engine.fifo.priv = NULL; +- kfree(priv); ++ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); ++ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); + } + + int +@@ -248,7 +225,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + + NV_DEBUG(dev, "ch%d\n", chan->id); + +- if (IS_G80) { ++ if (dev_priv->chipset == 0x50) { + uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; + uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; + +@@ -281,10 +258,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + +- dev_priv->engine.instmem.prepare_access(dev, true); +- + nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); +- nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); ++ nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ | ++ (4 << 24) /* SEARCH_FULL */ | ++ (chan->ramht->instance >> 4)); + nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); + nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); + nv_wo32(dev, ramfc, 0x40/4, 0x00000000); +@@ -295,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + chan->dma.ib_base * 4); + nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); + +- if (!IS_G80) { ++ if (dev_priv->chipset != 0x50) { + nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); + nv_wo32(dev, chan->ramin->gpuobj, 1, + chan->ramfc->instance >> 8); +@@ -304,16 +281,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); + } + +- dev_priv->engine.instmem.finish_access(dev); +- +- ret = nv50_fifo_channel_enable(dev, chan->id, false); +- if (ret) { +- NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); +- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); +- nouveau_gpuobj_ref_del(dev, &chan->ramfc); +- return ret; +- } ++ dev_priv->engine.instmem.flush(dev); + ++ nv50_fifo_channel_enable(dev, chan->id); ++ nv50_fifo_playlist_update(dev); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + return 0; + } +@@ -328,11 +299,12 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) + + /* This will ensure the channel is seen as disabled. */ + chan->ramfc = NULL; +- nv50_fifo_channel_disable(dev, chan->id, false); ++ nv50_fifo_channel_disable(dev, chan->id); + + /* Dummy channel, also used on ch 127 */ + if (chan->id == 0) +- nv50_fifo_channel_disable(dev, 127, false); ++ nv50_fifo_channel_disable(dev, 127); ++ nv50_fifo_playlist_update(dev); + + nouveau_gpuobj_ref_del(dev, &ramfc); + nouveau_gpuobj_ref_del(dev, &chan->cache); +@@ -349,8 +321,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) + + NV_DEBUG(dev, "ch%d\n", chan->id); + +- dev_priv->engine.instmem.prepare_access(dev, false); +- + nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); + nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); + nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); +@@ -396,7 +366,7 @@ nv50_fifo_load_context(struct nouveau_channel *chan) + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + + /* guessing that all the 0x34xx regs aren't on NV50 */ +- if (!IS_G80) { ++ if (dev_priv->chipset != 0x50) { + nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); + nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); + nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); +@@ -404,8 +374,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) + nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); + } + +- dev_priv->engine.instmem.finish_access(dev); +- + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); + return 0; + } +@@ -434,8 +402,6 @@ nv50_fifo_unload_context(struct drm_device *dev) + ramfc = chan->ramfc->gpuobj; + cache = chan->cache->gpuobj; + +- dev_priv->engine.instmem.prepare_access(dev, true); +- + nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); + nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); + nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); +@@ -482,7 +448,7 @@ nv50_fifo_unload_context(struct drm_device *dev) + } + + /* guessing that all the 0x34xx regs aren't on NV50 */ +- if (!IS_G80) { ++ if (dev_priv->chipset != 0x50) { + nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); + nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); + nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); +@@ -491,7 +457,7 @@ nv50_fifo_unload_context(struct drm_device *dev) + nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); + } + +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + /*XXX: probably reload ch127 (NULL) state back too */ + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); +diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c +index b203d06..1413028 100644 +--- a/drivers/gpu/drm/nouveau/nv50_graph.c ++++ b/drivers/gpu/drm/nouveau/nv50_graph.c +@@ -30,8 +30,6 @@ + + #include "nouveau_grctx.h" + +-#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) +- + static void + nv50_graph_init_reset(struct drm_device *dev) + { +@@ -103,37 +101,33 @@ static int + nv50_graph_init_ctxctl(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_grctx ctx = {}; ++ uint32_t *cp; ++ int i; + + NV_DEBUG(dev, "\n"); + +- if (nouveau_ctxfw) { +- nouveau_grctx_prog_load(dev); +- dev_priv->engine.graph.grctx_size = 0x70000; ++ cp = kmalloc(512 * 4, GFP_KERNEL); ++ if (!cp) { ++ NV_ERROR(dev, "failed to allocate ctxprog\n"); ++ dev_priv->engine.graph.accel_blocked = true; ++ return 0; + } +- if (!dev_priv->engine.graph.ctxprog) { +- struct nouveau_grctx ctx = {}; +- uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); +- int i; +- if (!cp) { +- NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); +- dev_priv->engine.graph.accel_blocked = true; +- return 0; +- } +- ctx.dev = dev; +- ctx.mode = NOUVEAU_GRCTX_PROG; +- ctx.data = cp; +- ctx.ctxprog_max = 512; +- if (!nv50_grctx_init(&ctx)) { +- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; +- +- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); +- for (i = 0; i < ctx.ctxprog_len; i++) +- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); +- } else { +- dev_priv->engine.graph.accel_blocked = true; +- } +- kfree(cp); ++ ++ ctx.dev = dev; ++ ctx.mode = NOUVEAU_GRCTX_PROG; ++ ctx.data = cp; ++ ctx.ctxprog_max = 512; ++ if (!nv50_grctx_init(&ctx)) { ++ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; ++ ++ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ for (i = 0; i < ctx.ctxprog_len; i++) ++ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); ++ } else { ++ dev_priv->engine.graph.accel_blocked = true; + } ++ kfree(cp); + + nv_wr32(dev, 0x400320, 4); + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); +@@ -164,7 +158,6 @@ void + nv50_graph_takedown(struct drm_device *dev) + { + NV_DEBUG(dev, "\n"); +- nouveau_grctx_fini(dev); + } + + void +@@ -212,8 +205,9 @@ nv50_graph_create_context(struct nouveau_channel *chan) + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; +- struct nouveau_gpuobj *ctx; ++ struct nouveau_gpuobj *obj; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; ++ struct nouveau_grctx ctx = {}; + int hdr, ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); +@@ -223,10 +217,9 @@ nv50_graph_create_context(struct nouveau_channel *chan) + NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); + if (ret) + return ret; +- ctx = chan->ramin_grctx->gpuobj; ++ obj = chan->ramin_grctx->gpuobj; + +- hdr = IS_G80 ? 0x200 : 0x20; +- dev_priv->engine.instmem.prepare_access(dev, true); ++ hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; + nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); + nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + + pgraph->grctx_size - 1); +@@ -234,21 +227,15 @@ nv50_graph_create_context(struct nouveau_channel *chan) + nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); + nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); + nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); +- dev_priv->engine.instmem.finish_access(dev); +- +- dev_priv->engine.instmem.prepare_access(dev, true); +- if (!pgraph->ctxprog) { +- struct nouveau_grctx ctx = {}; +- ctx.dev = chan->dev; +- ctx.mode = NOUVEAU_GRCTX_VALS; +- ctx.data = chan->ramin_grctx->gpuobj; +- nv50_grctx_init(&ctx); +- } else { +- nouveau_grctx_vals_load(dev, ctx); +- } +- nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); +- dev_priv->engine.instmem.finish_access(dev); + ++ ctx.dev = chan->dev; ++ ctx.mode = NOUVEAU_GRCTX_VALS; ++ ctx.data = obj; ++ nv50_grctx_init(&ctx); ++ ++ nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); ++ ++ dev_priv->engine.instmem.flush(dev); + return 0; + } + +@@ -257,17 +244,16 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- int i, hdr = IS_G80 ? 0x200 : 0x20; ++ int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + if (!chan->ramin || !chan->ramin->gpuobj) + return; + +- dev_priv->engine.instmem.prepare_access(dev, true); + for (i = hdr; i < hdr + 24; i += 4) + nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + } +diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c +index 5f21df3..0c8a6f2 100644 +--- a/drivers/gpu/drm/nouveau/nv50_instmem.c ++++ b/drivers/gpu/drm/nouveau/nv50_instmem.c +@@ -35,8 +35,6 @@ struct nv50_instmem_priv { + struct nouveau_gpuobj_ref *pramin_pt; + struct nouveau_gpuobj_ref *pramin_bar; + struct nouveau_gpuobj_ref *fb_bar; +- +- bool last_access_wr; + }; + + #define NV50_INSTMEM_PAGE_SHIFT 12 +@@ -147,7 +145,7 @@ nv50_instmem_init(struct drm_device *dev) + if (ret) + return ret; + +- if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) ++ if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) + return -ENOMEM; + + /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ +@@ -262,23 +260,18 @@ nv50_instmem_init(struct drm_device *dev) + + /* Assume that praying isn't enough, check that we can re-read the + * entire fake channel back from the PRAMIN BAR */ +- dev_priv->engine.instmem.prepare_access(dev, false); + for (i = 0; i < c_size; i += 4) { + if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { + NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", + i); +- dev_priv->engine.instmem.finish_access(dev); + return -EINVAL; + } + } +- dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); + + /* Global PRAMIN heap */ +- if (nouveau_mem_init_heap(&dev_priv->ramin_heap, +- c_size, dev_priv->ramin_size - c_size)) { +- dev_priv->ramin_heap = NULL; ++ if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) { + NV_ERROR(dev, "Failed to init RAMIN heap\n"); + } + +@@ -321,7 +314,7 @@ nv50_instmem_takedown(struct drm_device *dev) + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->ramin); +- nouveau_mem_takedown(&chan->ramin_heap); ++ drm_mm_takedown(&chan->ramin_heap); + + dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; + kfree(chan); +@@ -436,14 +429,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) + if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) + return -EINVAL; + +- NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", ++ NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", + gpuobj->im_pramin->start, gpuobj->im_pramin->size); + + pte = (gpuobj->im_pramin->start >> 12) << 1; + pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; + vram = gpuobj->im_backing_start; + +- NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", ++ NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", + gpuobj->im_pramin->start, pte, pte_end); + NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + +@@ -453,27 +446,15 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) + vram |= 0x30; + } + +- dev_priv->engine.instmem.prepare_access(dev, true); + while (pte < pte_end) { + nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); + nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); + vram += NV50_INSTMEM_PAGE_SIZE; + } +- dev_priv->engine.instmem.finish_access(dev); +- +- nv_wr32(dev, 0x100c80, 0x00040001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } ++ dev_priv->engine.instmem.flush(dev); + +- nv_wr32(dev, 0x100c80, 0x00060001); +- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); +- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); +- return -EBUSY; +- } ++ nv50_vm_flush(dev, 4); ++ nv50_vm_flush(dev, 6); + + gpuobj->im_bound = 1; + return 0; +@@ -492,36 +473,28 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) + pte = (gpuobj->im_pramin->start >> 12) << 1; + pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; + +- dev_priv->engine.instmem.prepare_access(dev, true); + while (pte < pte_end) { + nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); + nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); + } +- dev_priv->engine.instmem.finish_access(dev); ++ dev_priv->engine.instmem.flush(dev); + + gpuobj->im_bound = 0; + return 0; + } + + void +-nv50_instmem_prepare_access(struct drm_device *dev, bool write) ++nv50_instmem_flush(struct drm_device *dev) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; +- +- priv->last_access_wr = write; ++ nv_wr32(dev, 0x070000, 0x00000001); ++ if (!nv_wait(0x070000, 0x00000001, 0x00000000)) ++ NV_ERROR(dev, "PRAMIN flush timeout\n"); + } + + void +-nv50_instmem_finish_access(struct drm_device *dev) ++nv50_vm_flush(struct drm_device *dev, int engine) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; +- +- if (priv->last_access_wr) { +- nv_wr32(dev, 0x070000, 0x00000001); +- if (!nv_wait(0x070000, 0x00000001, 0x00000000)) +- NV_ERROR(dev, "PRAMIN flush timeout\n"); +- } ++ nv_wr32(dev, 0x100c80, (engine << 16) | 1); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) ++ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); + } +- +diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c +index 812778d..bcd4cf8 100644 +--- a/drivers/gpu/drm/nouveau/nv50_sor.c ++++ b/drivers/gpu/drm/nouveau/nv50_sor.c +@@ -37,52 +37,32 @@ + #include "nv50_display.h" + + static void +-nv50_sor_disconnect(struct nouveau_encoder *nv_encoder) ++nv50_sor_disconnect(struct drm_encoder *encoder) + { +- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; ++ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); ++ struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + int ret; + ++ if (!nv_encoder->crtc) ++ return; ++ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); ++ + NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); + +- ret = RING_SPACE(evo, 2); ++ ret = RING_SPACE(evo, 4); + if (ret) { + NV_ERROR(dev, "no space while disconnecting SOR\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); +- OUT_RING(evo, 0); +-} +- +-static void +-nv50_sor_dp_link_train(struct drm_encoder *encoder) +-{ +- struct drm_device *dev = encoder->dev; +- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); +- struct bit_displayport_encoder_table *dpe; +- int dpe_headerlen; +- +- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); +- if (!dpe) { +- NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); +- return; +- } ++ OUT_RING (evo, 0); ++ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); ++ OUT_RING (evo, 0); + +- if (dpe->script0) { +- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); +- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), +- nv_encoder->dcb); +- } +- +- if (!nouveau_dp_link_train(encoder)) +- NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); +- +- if (dpe->script1) { +- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); +- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), +- nv_encoder->dcb); +- } ++ nv_encoder->crtc = NULL; ++ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + } + + static void +@@ -94,14 +74,16 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) + uint32_t val; + int or = nv_encoder->or; + +- NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); ++ NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); + + nv_encoder->last_dpms = mode; + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nvenc = nouveau_encoder(enc); + + if (nvenc == nv_encoder || +- nvenc->disconnect != nv50_sor_disconnect || ++ (nvenc->dcb->type != OUTPUT_TMDS && ++ nvenc->dcb->type != OUTPUT_LVDS && ++ nvenc->dcb->type != OUTPUT_DP) || + nvenc->dcb->or != nv_encoder->dcb->or) + continue; + +@@ -133,8 +115,22 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) + nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); + } + +- if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON) +- nv50_sor_dp_link_train(encoder); ++ if (nv_encoder->dcb->type == OUTPUT_DP) { ++ struct nouveau_i2c_chan *auxch; ++ ++ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); ++ if (!auxch) ++ return; ++ ++ if (mode == DRM_MODE_DPMS_ON) { ++ u8 status = DP_SET_POWER_D0; ++ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); ++ nouveau_dp_link_train(encoder); ++ } else { ++ u8 status = DP_SET_POWER_D3; ++ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); ++ } ++ } + } + + static void +@@ -196,7 +192,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + uint32_t mode_ctl = 0; + int ret; + +- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); ++ NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", ++ nv_encoder->or, nv_encoder->dcb->type, crtc->index); + + nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); + +@@ -239,6 +236,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + } + BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING(evo, mode_ctl); ++ ++ nv_encoder->crtc = encoder->crtc; ++} ++ ++static struct drm_crtc * ++nv50_sor_crtc_get(struct drm_encoder *encoder) ++{ ++ return nouveau_encoder(encoder)->crtc; + } + + static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { +@@ -249,7 +254,9 @@ static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { + .prepare = nv50_sor_prepare, + .commit = nv50_sor_commit, + .mode_set = nv50_sor_mode_set, +- .detect = NULL ++ .get_crtc = nv50_sor_crtc_get, ++ .detect = NULL, ++ .disable = nv50_sor_disconnect + }; + + static void +@@ -272,32 +279,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { + }; + + int +-nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) ++nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) + { + struct nouveau_encoder *nv_encoder = NULL; ++ struct drm_device *dev = connector->dev; + struct drm_encoder *encoder; +- bool dum; + int type; + + NV_DEBUG_KMS(dev, "\n"); + + switch (entry->type) { + case OUTPUT_TMDS: +- NV_INFO(dev, "Detected a TMDS output\n"); ++ case OUTPUT_DP: + type = DRM_MODE_ENCODER_TMDS; + break; + case OUTPUT_LVDS: +- NV_INFO(dev, "Detected a LVDS output\n"); + type = DRM_MODE_ENCODER_LVDS; +- +- if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) { +- NV_ERROR(dev, "Failed parsing LVDS table\n"); +- return -EINVAL; +- } +- break; +- case OUTPUT_DP: +- NV_INFO(dev, "Detected a DP output\n"); +- type = DRM_MODE_ENCODER_TMDS; + break; + default: + return -EINVAL; +@@ -310,8 +307,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; +- +- nv_encoder->disconnect = nv50_sor_disconnect; ++ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); + drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); +@@ -342,5 +338,6 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) + nv_encoder->dp.mc_unknown = 5; + } + ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } +-- +1.7.1.1 + diff --git a/drm-radeon-fix-shared-ddc-handling.patch b/drm-radeon-fix-shared-ddc-handling.patch new file mode 100644 index 000000000..f17827cdc --- /dev/null +++ b/drm-radeon-fix-shared-ddc-handling.patch @@ -0,0 +1,36 @@ +From 557b452536c9390105539a264d342d963d71b087 Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Mon, 21 Jun 2010 12:07:52 -0400 +Subject: [PATCH] drm/radeon/kms: fix shared ddc handling +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Connectors with a shared ddc line can be connected to different +encoders. + +Reported by Pasi Kärkkäinen on dri-devel + +Signed-off-by: Alex Deucher +--- + drivers/gpu/drm/radeon/radeon_connectors.c | 4 +++- + 1 files changed, 3 insertions(+), 1 deletions(-) + +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 0c7ccc6..f58f8bd 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -785,7 +785,9 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect + if (connector == list_connector) + continue; + list_radeon_connector = to_radeon_connector(list_connector); +- if (radeon_connector->devices == list_radeon_connector->devices) { ++ if (list_radeon_connector->shared_ddc && ++ (list_radeon_connector->ddc_bus->rec.i2c_id == ++ radeon_connector->ddc_bus->rec.i2c_id)) { + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { + kfree(radeon_connector->edid); +-- +1.7.0.1 + diff --git a/drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch b/drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch new file mode 100644 index 000000000..481a08fdc --- /dev/null +++ b/drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch @@ -0,0 +1,958 @@ +From 5b904034b0ab5195d971b139d0c0b67ab21b063c Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 21 Jun 2010 20:33:16 +0100 +Subject: Revert "drm/fbdev: rework output polling to be back in the core. (v4)" + +This reverts commit eb1f8e4f3be898df808e2dfc131099f5831d491d. + +Conflicts: + + drivers/gpu/drm/drm_crtc_helper.c + drivers/gpu/drm/i915/i915_dma.c + drivers/gpu/drm/i915/intel_fb.c + drivers/gpu/drm/nouveau/nouveau_fbcon.c + drivers/gpu/drm/radeon/radeon_fb.c + include/drm/drm_crtc_helper.h +--- + drivers/gpu/drm/Kconfig | 2 +- + drivers/gpu/drm/drm_crtc_helper.c | 111 ------------------------ + drivers/gpu/drm/drm_fb_helper.c | 123 +++++++++++++++++++++++---- + drivers/gpu/drm/i915/i915_dma.c | 1 - + drivers/gpu/drm/i915/i915_irq.c | 3 +- + drivers/gpu/drm/i915/intel_crt.c | 5 - + drivers/gpu/drm/i915/intel_display.c | 2 - + drivers/gpu/drm/i915/intel_dp.c | 2 - + drivers/gpu/drm/i915/intel_drv.h | 2 +- + drivers/gpu/drm/i915/intel_fb.c | 14 ++-- + drivers/gpu/drm/i915/intel_hdmi.c | 1 - + drivers/gpu/drm/i915/intel_sdvo.c | 2 - + drivers/gpu/drm/nouveau/nouveau_connector.c | 12 --- + drivers/gpu/drm/nouveau/nouveau_display.c | 1 - + drivers/gpu/drm/nouveau/nouveau_fbcon.c | 13 ++- + drivers/gpu/drm/nouveau/nouveau_fbcon.h | 2 +- + drivers/gpu/drm/nouveau/nouveau_state.c | 5 +- + drivers/gpu/drm/nouveau/nv50_display.c | 2 +- + drivers/gpu/drm/radeon/radeon_connectors.c | 13 --- + drivers/gpu/drm/radeon/radeon_display.c | 10 -- + drivers/gpu/drm/radeon/radeon_fb.c | 15 +++- + drivers/gpu/drm/radeon/radeon_irq_kms.c | 5 +- + drivers/gpu/drm/radeon/radeon_mode.h | 3 +- + include/drm/drm_crtc.h | 17 ---- + include/drm/drm_crtc_helper.h | 6 -- + include/drm/drm_fb_helper.h | 13 +++- + 26 files changed, 155 insertions(+), 230 deletions(-) + +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index c2711c6..a51a1e4 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -9,7 +9,6 @@ menuconfig DRM + depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU + select I2C + select I2C_ALGOBIT +- select SLOW_WORK + help + Kernel-level support for the Direct Rendering Infrastructure (DRI) + introduced in XFree86 4.0. If you say Y here, you need to select +@@ -24,6 +23,7 @@ config DRM_KMS_HELPER + depends on DRM + select FB + select FRAMEBUFFER_CONSOLE if !EMBEDDED ++ select SLOW_WORK + help + FB and CRTC helpers for KMS drivers. + +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index 9b2a541..b142ac2 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -807,114 +807,3 @@ int drm_helper_resume_force_mode(struct drm_device *dev) + return 0; + } + EXPORT_SYMBOL(drm_helper_resume_force_mode); +- +-static struct slow_work_ops output_poll_ops; +- +-#define DRM_OUTPUT_POLL_PERIOD (10*HZ) +-static void output_poll_execute(struct slow_work *work) +-{ +- struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); +- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); +- struct drm_connector *connector; +- enum drm_connector_status old_status, status; +- bool repoll = false, changed = false; +- int ret; +- +- mutex_lock(&dev->mode_config.mutex); +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- +- /* if this is HPD or polled don't check it - +- TV out for instance */ +- if (!connector->polled) +- continue; +- +- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) +- repoll = true; +- +- old_status = connector->status; +- /* if we are connected and don't want to poll for disconnect +- skip it */ +- if (old_status == connector_status_connected && +- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && +- !(connector->polled & DRM_CONNECTOR_POLL_HPD)) +- continue; +- +- status = connector->funcs->detect(connector); +- if (old_status != status) +- changed = true; +- } +- +- mutex_unlock(&dev->mode_config.mutex); +- +- if (changed) { +- /* send a uevent + call fbdev */ +- drm_sysfs_hotplug_event(dev); +- if (dev->mode_config.funcs->output_poll_changed) +- dev->mode_config.funcs->output_poll_changed(dev); +- } +- +- if (repoll) { +- ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); +- if (ret) +- DRM_ERROR("delayed enqueue failed %d\n", ret); +- } +-} +- +-void drm_kms_helper_poll_disable(struct drm_device *dev) +-{ +- if (!dev->mode_config.poll_enabled) +- return; +- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); +-} +-EXPORT_SYMBOL(drm_kms_helper_poll_disable); +- +-void drm_kms_helper_poll_enable(struct drm_device *dev) +-{ +- bool poll = false; +- struct drm_connector *connector; +- int ret; +- +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- if (connector->polled) +- poll = true; +- } +- +- if (poll) { +- ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); +- if (ret) +- DRM_ERROR("delayed enqueue failed %d\n", ret); +- } +-} +-EXPORT_SYMBOL(drm_kms_helper_poll_enable); +- +-void drm_kms_helper_poll_init(struct drm_device *dev) +-{ +- slow_work_register_user(THIS_MODULE); +- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, +- &output_poll_ops); +- dev->mode_config.poll_enabled = true; +- +- drm_kms_helper_poll_enable(dev); +-} +-EXPORT_SYMBOL(drm_kms_helper_poll_init); +- +-void drm_kms_helper_poll_fini(struct drm_device *dev) +-{ +- drm_kms_helper_poll_disable(dev); +- slow_work_unregister_user(THIS_MODULE); +-} +-EXPORT_SYMBOL(drm_kms_helper_poll_fini); +- +-void drm_helper_hpd_irq_event(struct drm_device *dev) +-{ +- if (!dev->mode_config.poll_enabled) +- return; +- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); +- /* schedule a slow work asap */ +- delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); +-} +-EXPORT_SYMBOL(drm_helper_hpd_irq_event); +- +-static struct slow_work_ops output_poll_ops = { +- .execute = output_poll_execute, +-}; +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c +index 08c4c92..dcc6601 100644 +--- a/drivers/gpu/drm/drm_fb_helper.c ++++ b/drivers/gpu/drm/drm_fb_helper.c +@@ -42,6 +42,8 @@ MODULE_LICENSE("GPL and additional rights"); + + static LIST_HEAD(kernel_fb_helper_list); + ++static struct slow_work_ops output_status_change_ops; ++ + /* simple single crtc case helper function */ + int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) + { +@@ -423,13 +425,19 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) + + int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *fb_helper, +- int crtc_count, int max_conn_count) ++ int crtc_count, int max_conn_count, ++ bool polled) + { + struct drm_crtc *crtc; + int ret = 0; + int i; + + fb_helper->dev = dev; ++ fb_helper->poll_enabled = polled; ++ ++ slow_work_register_user(THIS_MODULE); ++ delayed_slow_work_init(&fb_helper->output_status_change_slow_work, ++ &output_status_change_ops); + + INIT_LIST_HEAD(&fb_helper->kernel_fb_list); + +@@ -486,6 +494,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) + + drm_fb_helper_crtc_free(fb_helper); + ++ delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work); ++ slow_work_unregister_user(THIS_MODULE); + } + EXPORT_SYMBOL(drm_fb_helper_fini); + +@@ -703,7 +713,7 @@ int drm_fb_helper_set_par(struct fb_info *info) + + if (fb_helper->delayed_hotplug) { + fb_helper->delayed_hotplug = false; +- drm_fb_helper_hotplug_event(fb_helper); ++ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0); + } + return 0; + } +@@ -816,7 +826,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, + if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { + /* hmm everyone went away - assume VGA cable just fell out + and will come back later. */ +- DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); ++ DRM_ERROR("Cannot find any crtc or sizes - going 1024x768\n"); + sizes.fb_width = sizes.surface_width = 1024; + sizes.fb_height = sizes.surface_height = 768; + } +@@ -1362,7 +1372,12 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) + * we shouldn't end up with no modes here. + */ + if (count == 0) { +- printk(KERN_INFO "No connectors reported connected with modes\n"); ++ if (fb_helper->poll_enabled) { ++ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, ++ 5*HZ); ++ printk(KERN_INFO "No connectors reported connected with modes - started polling\n"); ++ } else ++ printk(KERN_INFO "No connectors reported connected with modes\n"); + } + drm_setup_crtcs(fb_helper); + +@@ -1370,16 +1385,71 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) + } + EXPORT_SYMBOL(drm_fb_helper_initial_config); + +-bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) ++/* we got a hotplug irq - need to update fbcon */ ++void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper) ++{ ++ /* if we don't have the fbdev registered yet do nothing */ ++ if (!fb_helper->fbdev) ++ return; ++ ++ /* schedule a slow work asap */ ++ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0); ++} ++EXPORT_SYMBOL(drm_helper_fb_hpd_irq_event); ++ ++bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, bool polled) + { + int count = 0; ++ int ret; + u32 max_width, max_height, bpp_sel; +- bool bound = false, crtcs_bound = false; +- struct drm_crtc *crtc; + + if (!fb_helper->fb) + return false; ++ DRM_DEBUG_KMS("\n"); ++ ++ max_width = fb_helper->fb->width; ++ max_height = fb_helper->fb->height; ++ bpp_sel = fb_helper->fb->bits_per_pixel; ++ ++ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, ++ max_height); ++ if (fb_helper->poll_enabled && !polled) { ++ if (count) { ++ delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work); ++ } else { ++ ret = delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 5*HZ); ++ } ++ } ++ drm_setup_crtcs(fb_helper); ++ ++ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); ++} ++EXPORT_SYMBOL(drm_helper_fb_hotplug_event); ++ ++/* ++ * delayed work queue execution function ++ * - check if fbdev is actually in use on the gpu ++ * - if not set delayed flag and repoll if necessary ++ * - check for connector status change ++ * - repoll if 0 modes found ++ *- call driver output status changed notifier ++ */ ++static void output_status_change_execute(struct slow_work *work) ++{ ++ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); ++ struct drm_fb_helper *fb_helper = container_of(delayed_work, struct drm_fb_helper, output_status_change_slow_work); ++ struct drm_connector *connector; ++ enum drm_connector_status old_status, status; ++ bool repoll, changed = false; ++ int ret; ++ int i; ++ bool bound = false, crtcs_bound = false; ++ struct drm_crtc *crtc; + ++ repoll = fb_helper->poll_enabled; ++ ++ /* first of all check the fbcon framebuffer is actually bound to any crtc */ ++ /* take into account that no crtc at all maybe bound */ + list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { + if (crtc->fb) + crtcs_bound = true; +@@ -1387,21 +1457,38 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) + bound = true; + } + +- if (!bound && crtcs_bound) { ++ if (bound == false && crtcs_bound) { + fb_helper->delayed_hotplug = true; +- return false; ++ goto requeue; + } +- DRM_DEBUG_KMS("\n"); + +- max_width = fb_helper->fb->width; +- max_height = fb_helper->fb->height; +- bpp_sel = fb_helper->fb->bits_per_pixel; ++ for (i = 0; i < fb_helper->connector_count; i++) { ++ connector = fb_helper->connector_info[i]->connector; ++ old_status = connector->status; ++ status = connector->funcs->detect(connector); ++ if (old_status != status) { ++ changed = true; ++ } ++ if (status == connector_status_connected && repoll) { ++ DRM_DEBUG("%s is connected - stop polling\n", drm_get_connector_name(connector)); ++ repoll = false; ++ } ++ } + +- count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, +- max_height); +- drm_setup_crtcs(fb_helper); ++ if (changed) { ++ if (fb_helper->funcs->fb_output_status_changed) ++ fb_helper->funcs->fb_output_status_changed(fb_helper); ++ } + +- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); ++requeue: ++ if (repoll) { ++ ret = delayed_slow_work_enqueue(delayed_work, 5*HZ); ++ if (ret) ++ DRM_ERROR("delayed enqueue failed %d\n", ret); ++ } + } +-EXPORT_SYMBOL(drm_fb_helper_hotplug_event); ++ ++static struct slow_work_ops output_status_change_ops = { ++ .execute = output_status_change_execute, ++}; + +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 59a2bf8..76ace2d 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -1430,7 +1430,6 @@ static int i915_load_modeset_init(struct drm_device *dev, + if (ret) + goto cleanup_irq; + +- drm_kms_helper_poll_init(dev); + return 0; + + cleanup_irq: +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 2479be0..6350bd3 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -271,7 +271,8 @@ static void i915_hotplug_work_func(struct work_struct *work) + } + } + /* Just fire off a uevent and let userspace tell us what to do */ +- drm_helper_hpd_irq_event(dev); ++ intelfb_hotplug(dev, false); ++ drm_sysfs_hotplug_event(dev); + } + + static void i915_handle_rps_change(struct drm_device *dev) +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +index 22ff384..125eded 100644 +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -584,10 +584,5 @@ void intel_crt_init(struct drm_device *dev) + + drm_sysfs_connector_add(connector); + +- if (I915_HAS_HOTPLUG(dev)) +- connector->polled = DRM_CONNECTOR_POLL_HPD; +- else +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; +- + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; + } +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index d753257..70537cf 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -5036,7 +5036,6 @@ intel_user_framebuffer_create(struct drm_device *dev, + + static const struct drm_mode_config_funcs intel_mode_funcs = { + .fb_create = intel_user_framebuffer_create, +- .output_poll_changed = intel_fb_output_poll_changed, + }; + + static struct drm_gem_object * +@@ -5538,7 +5537,6 @@ void intel_modeset_cleanup(struct drm_device *dev) + + mutex_lock(&dev->struct_mutex); + +- drm_kms_helper_poll_fini(dev); + intel_fbdev_fini(dev); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c +index 49b54f0..1815df5 100644 +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -1393,8 +1393,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) + DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + +- connector->polled = DRM_CONNECTOR_POLL_HPD; +- + if (output_reg == DP_A) + intel_encoder->type = INTEL_OUTPUT_EDP; + else +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h +index df931f7..3230e8d 100644 +--- a/drivers/gpu/drm/i915/intel_drv.h ++++ b/drivers/gpu/drm/i915/intel_drv.h +@@ -235,5 +235,5 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data, + extern int intel_overlay_attrs(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +-extern void intel_fb_output_poll_changed(struct drm_device *dev); ++void intelfb_hotplug(struct drm_device *dev, bool polled); + #endif /* __INTEL_DRV_H__ */ +diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c +index c3c5052..79098b3 100644 +--- a/drivers/gpu/drm/i915/intel_fb.c ++++ b/drivers/gpu/drm/i915/intel_fb.c +@@ -211,6 +211,12 @@ static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, + return new_fb; + } + ++void intelfb_hotplug(struct drm_device *dev, bool polled) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_helper_fb_hpd_irq_event(&dev_priv->fbdev->helper); ++} ++ + static struct drm_fb_helper_funcs intel_fb_helper_funcs = { + .gamma_set = intel_crtc_fb_gamma_set, + .gamma_get = intel_crtc_fb_gamma_get, +@@ -256,7 +262,7 @@ int intel_fbdev_init(struct drm_device *dev) + + ret = drm_fb_helper_init(dev, &ifbdev->helper, + dev_priv->num_pipe, +- INTELFB_CONN_LIMIT); ++ INTELFB_CONN_LIMIT, false); + if (ret) { + kfree(ifbdev); + return ret; +@@ -278,9 +284,3 @@ void intel_fbdev_fini(struct drm_device *dev) + dev_priv->fbdev = NULL; + } + MODULE_LICENSE("GPL and additional rights"); +- +-void intel_fb_output_poll_changed(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); +-} +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c +index 83bd764..acaca07 100644 +--- a/drivers/gpu/drm/i915/intel_hdmi.c ++++ b/drivers/gpu/drm/i915/intel_hdmi.c +@@ -240,7 +240,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) + + intel_encoder->type = INTEL_OUTPUT_HDMI; + +- connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index 76993ac..1c716b5 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -2218,7 +2218,6 @@ intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) + } + + connector = &intel_connector->base; +- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_TMDS; + connector->connector_type = DRM_MODE_CONNECTOR_DVID; + +@@ -2285,7 +2284,6 @@ intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) + return false; + + connector = &intel_connector->base; +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + sdvo_connector = intel_connector->dev_priv; +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index 149ed22..9a61f3c 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -846,7 +846,6 @@ nouveau_connector_create(struct drm_device *dev, + + switch (dcb->type) { + case DCB_CONNECTOR_VGA: +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (dev_priv->card_type >= NV_50) { + drm_connector_attach_property(connector, + dev->mode_config.scaling_mode_property, +@@ -858,17 +857,6 @@ nouveau_connector_create(struct drm_device *dev, + case DCB_CONNECTOR_TV_3: + nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; + break; +- case DCB_CONNECTOR_DP: +- case DCB_CONNECTOR_eDP: +- case DCB_CONNECTOR_HDMI_0: +- case DCB_CONNECTOR_HDMI_1: +- case DCB_CONNECTOR_DVI_I: +- case DCB_CONNECTOR_DVI_D: +- if (dev_priv->card_type >= NV_50) +- connector->polled = DRM_CONNECTOR_POLL_HPD; +- else +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; +- /* fall-through */ + default: + nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c +index 74e6b4e..9d7928f 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c +@@ -101,6 +101,5 @@ nouveau_user_framebuffer_create(struct drm_device *dev, + + const struct drm_mode_config_funcs nouveau_mode_config_funcs = { + .fb_create = nouveau_user_framebuffer_create, +- .output_poll_changed = nouveau_fbcon_output_poll_changed, + }; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +index c9a4a0d..0a59f96 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +@@ -326,11 +326,15 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, + return new_fb; + } + +-void +-nouveau_fbcon_output_poll_changed(struct drm_device *dev) ++void nouveau_fbcon_hotplug(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); ++ drm_helper_fb_hpd_irq_event(&dev_priv->nfbdev->helper); ++} ++ ++static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper) ++{ ++ drm_helper_fb_hotplug_event(fb_helper, true); + } + + int +@@ -370,6 +374,7 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { + .gamma_set = nouveau_fbcon_gamma_set, + .gamma_get = nouveau_fbcon_gamma_get, + .fb_probe = nouveau_fbcon_find_or_create_single, ++ .fb_output_status_changed = nouveau_fbcon_output_status_changed, + }; + + +@@ -387,7 +392,7 @@ int nouveau_fbcon_init(struct drm_device *dev) + dev_priv->nfbdev = nfbdev; + nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; + +- ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); ++ ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4, true); + if (ret) { + kfree(nfbdev); + return ret; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h +index e7e1268..bf8e00d 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h +@@ -58,6 +58,6 @@ void nouveau_fbcon_zfill_all(struct drm_device *dev); + void nouveau_fbcon_save_disable_accel(struct drm_device *dev); + void nouveau_fbcon_restore_accel(struct drm_device *dev); + +-void nouveau_fbcon_output_poll_changed(struct drm_device *dev); ++void nouveau_fbcon_hotplug(struct drm_device *dev); + #endif /* __NV50_FBCON_H__ */ + +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index b02a231..4dcb976 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -519,10 +519,8 @@ nouveau_card_init(struct drm_device *dev) + + dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; + +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) + nouveau_fbcon_init(dev); +- drm_kms_helper_poll_init(dev); +- } + + return 0; + +@@ -844,7 +842,6 @@ int nouveau_unload(struct drm_device *dev) + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- drm_kms_helper_poll_fini(dev); + nouveau_fbcon_fini(dev); + if (dev_priv->card_type >= NV_50) + nv50_display_destroy(dev); +diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c +index 580a5d1..e6a44af 100644 +--- a/drivers/gpu/drm/nouveau/nv50_display.c ++++ b/drivers/gpu/drm/nouveau/nv50_display.c +@@ -980,7 +980,7 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); + +- drm_helper_hpd_irq_event(dev); ++ nouveau_fbcon_hotplug(dev); + } + + void +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 0c7ccc6..40a24c9 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -1085,7 +1085,6 @@ radeon_add_atom_connector(struct drm_device *dev, + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + break; + case DRM_MODE_CONNECTOR_DVIA: + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); +@@ -1212,12 +1211,6 @@ radeon_add_atom_connector(struct drm_device *dev, + break; + } + +- if (hpd->hpd == RADEON_HPD_NONE) { +- if (i2c_bus->valid) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; +- } else +- connector->polled = DRM_CONNECTOR_POLL_HPD; +- + connector->display_info.subpixel_order = subpixel_order; + drm_sysfs_connector_add(connector); + return; +@@ -1279,7 +1272,6 @@ radeon_add_legacy_connector(struct drm_device *dev, + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + break; + case DRM_MODE_CONNECTOR_DVIA: + drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); +@@ -1348,11 +1340,6 @@ radeon_add_legacy_connector(struct drm_device *dev, + break; + } + +- if (hpd->hpd == RADEON_HPD_NONE) { +- if (i2c_bus->valid) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; +- } else +- connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; + drm_sysfs_connector_add(connector); + return; +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index c73444a..ed756be 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -887,15 +887,8 @@ radeon_user_framebuffer_create(struct drm_device *dev, + return &radeon_fb->base; + } + +-static void radeon_output_poll_changed(struct drm_device *dev) +-{ +- struct radeon_device *rdev = dev->dev_private; +- radeon_fb_output_poll_changed(rdev); +-} +- + static const struct drm_mode_config_funcs radeon_mode_funcs = { + .fb_create = radeon_user_framebuffer_create, +- .output_poll_changed = radeon_output_poll_changed + }; + + struct drm_prop_enum_list { +@@ -1044,8 +1037,6 @@ int radeon_modeset_init(struct radeon_device *rdev) + radeon_pm_init(rdev); + + radeon_fbdev_init(rdev); +- drm_kms_helper_poll_init(rdev->ddev); +- + return 0; + } + +@@ -1058,7 +1049,6 @@ void radeon_modeset_fini(struct radeon_device *rdev) + radeon_pm_fini(rdev); + + if (rdev->mode_info.mode_config_initialized) { +- drm_kms_helper_poll_fini(rdev->ddev); + radeon_hpd_fini(rdev); + drm_mode_config_cleanup(rdev->ddev); + rdev->mode_info.mode_config_initialized = false; +diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c +index dc1634b..7dc38f6 100644 +--- a/drivers/gpu/drm/radeon/radeon_fb.c ++++ b/drivers/gpu/drm/radeon/radeon_fb.c +@@ -316,9 +316,16 @@ int radeon_parse_options(char *options) + return 0; + } + +-void radeon_fb_output_poll_changed(struct radeon_device *rdev) ++void radeonfb_hotplug(struct drm_device *dev, bool polled) + { +- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); ++ struct radeon_device *rdev = dev->dev_private; ++ ++ drm_helper_fb_hpd_irq_event(&rdev->mode_info.rfbdev->helper); ++} ++ ++static void radeon_fb_output_status_changed(struct drm_fb_helper *fb_helper) ++{ ++ drm_helper_fb_hotplug_event(fb_helper, true); + } + + static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) +@@ -357,6 +364,7 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { + .gamma_set = radeon_crtc_fb_gamma_set, + .gamma_get = radeon_crtc_fb_gamma_get, + .fb_probe = radeon_fb_find_or_create_single, ++ .fb_output_status_changed = radeon_fb_output_status_changed, + }; + + int radeon_fbdev_init(struct radeon_device *rdev) +@@ -379,7 +387,7 @@ int radeon_fbdev_init(struct radeon_device *rdev) + + ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, + rdev->num_crtc, +- RADEONFB_CONN_LIMIT); ++ RADEONFB_CONN_LIMIT, true); + if (ret) { + kfree(rfbdev); + return ret; +@@ -388,6 +396,7 @@ int radeon_fbdev_init(struct radeon_device *rdev) + drm_fb_helper_single_add_all_connectors(&rfbdev->helper); + drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); + return 0; ++ + } + + void radeon_fbdev_fini(struct radeon_device *rdev) +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index 059bfa4..b0178de 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -26,7 +26,6 @@ + * Jerome Glisse + */ + #include "drmP.h" +-#include "drm_crtc_helper.h" + #include "radeon_drm.h" + #include "radeon_reg.h" + #include "radeon.h" +@@ -56,7 +55,9 @@ static void radeon_hotplug_work_func(struct work_struct *work) + radeon_connector_hotplug(connector); + } + /* Just fire off a uevent and let userspace tell us what to do */ +- drm_helper_hpd_irq_event(dev); ++ radeonfb_hotplug(dev, false); ++ ++ drm_sysfs_hotplug_event(dev); + } + + void radeon_driver_irq_preinstall_kms(struct drm_device *dev) +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h +index 67358ba..fdd1611 100644 +--- a/drivers/gpu/drm/radeon/radeon_mode.h ++++ b/drivers/gpu/drm/radeon/radeon_mode.h +@@ -588,6 +588,5 @@ void radeon_fbdev_fini(struct radeon_device *rdev); + void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); + int radeon_fbdev_total_size(struct radeon_device *rdev); + bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); +- +-void radeon_fb_output_poll_changed(struct radeon_device *rdev); ++void radeonfb_hotplug(struct drm_device *dev, bool polled); + #endif +diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h +index 93a1a31..a7148d2 100644 +--- a/include/drm/drm_crtc.h ++++ b/include/drm/drm_crtc.h +@@ -31,7 +31,6 @@ + #include + + #include +-#include + + struct drm_device; + struct drm_mode_set; +@@ -461,15 +460,6 @@ enum drm_connector_force { + DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ + }; + +-/* should we poll this connector for connects and disconnects */ +-/* hot plug detectable */ +-#define DRM_CONNECTOR_POLL_HPD (1 << 0) +-/* poll for connections */ +-#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) +-/* can cleanly poll for disconnections without flickering the screen */ +-/* DACs should rarely do this without a lot of testing */ +-#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) +- + /** + * drm_connector - central DRM connector control structure + * @crtc: CRTC this connector is currently connected to, NULL if none +@@ -514,8 +504,6 @@ struct drm_connector { + u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; + uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; + +- uint8_t polled; /* DRM_CONNECTOR_POLL_* */ +- + /* requested DPMS state */ + int dpms; + +@@ -555,7 +543,6 @@ struct drm_mode_set { + */ + struct drm_mode_config_funcs { + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); +- void (*output_poll_changed)(struct drm_device *dev); + }; + + struct drm_mode_group { +@@ -593,10 +580,6 @@ struct drm_mode_config { + struct drm_mode_config_funcs *funcs; + resource_size_t fb_base; + +- /* output poll support */ +- bool poll_enabled; +- struct delayed_slow_work output_poll_slow_work; +- + /* pointers to standard properties */ + struct list_head property_blob_list; + struct drm_property *edid_property; +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index 1121f77..b1fa0f8 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -127,10 +127,4 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, + } + + extern int drm_helper_resume_force_mode(struct drm_device *dev); +-extern void drm_kms_helper_poll_init(struct drm_device *dev); +-extern void drm_kms_helper_poll_fini(struct drm_device *dev); +-extern void drm_helper_hpd_irq_event(struct drm_device *dev); +- +-extern void drm_kms_helper_poll_disable(struct drm_device *dev); +-extern void drm_kms_helper_poll_enable(struct drm_device *dev); + #endif +diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h +index f0a6afc..9b55a94 100644 +--- a/include/drm/drm_fb_helper.h ++++ b/include/drm/drm_fb_helper.h +@@ -30,6 +30,8 @@ + #ifndef DRM_FB_HELPER_H + #define DRM_FB_HELPER_H + ++#include ++ + struct drm_fb_helper; + + struct drm_fb_helper_crtc { +@@ -69,6 +71,9 @@ struct drm_fb_helper_funcs { + + int (*fb_probe)(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); ++ ++ void (*fb_output_status_changed)(struct drm_fb_helper *helper); ++ + }; + + struct drm_fb_helper_connector { +@@ -90,6 +95,8 @@ struct drm_fb_helper { + u32 pseudo_palette[17]; + struct list_head kernel_fb_list; + ++ struct delayed_slow_work output_status_change_slow_work; ++ bool poll_enabled; + /* we got a hotplug but fbdev wasn't running the console + delay until next set_par */ + bool delayed_hotplug; +@@ -100,7 +107,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper, + + int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper, int crtc_count, +- int max_conn); ++ int max_conn, bool polled); + void drm_fb_helper_fini(struct drm_fb_helper *helper); + int drm_fb_helper_blank(int blank, struct fb_info *info); + int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, +@@ -123,8 +130,10 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + + int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); + +-bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); ++bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, ++ bool polled); + bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); + int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); + ++void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper); + #endif +-- +1.7.0.1 + diff --git a/ethtool-fix-buffer-overflow.patch b/ethtool-fix-buffer-overflow.patch new file mode 100644 index 000000000..01b1a41f3 --- /dev/null +++ b/ethtool-fix-buffer-overflow.patch @@ -0,0 +1,33 @@ +From: Ben Hutchings +Date: Mon, 28 Jun 2010 08:44:07 +0000 (+0000) +Subject: ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet-2.6.git;a=commitdiff_plain;h=db048b69037e7fa6a7d9e95a1271a50dc08ae233 + +ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL + +On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer +overflow and the buffer may be smaller than needed. Since +ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at +least denial of service. + +Signed-off-by: Ben Hutchings +Cc: stable@kernel.org +Signed-off-by: David S. Miller +--- + +diff --git a/net/core/ethtool.c b/net/core/ethtool.c +index a0f4964..a3a7e9a 100644 +--- a/net/core/ethtool.c ++++ b/net/core/ethtool.c +@@ -347,8 +347,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, + + if (info.cmd == ETHTOOL_GRXCLSRLALL) { + if (info.rule_cnt > 0) { +- rule_buf = kmalloc(info.rule_cnt * sizeof(u32), +- GFP_USER); ++ if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) ++ rule_buf = kmalloc(info.rule_cnt * sizeof(u32), ++ GFP_USER); + if (!rule_buf) + return -ENOMEM; + } diff --git a/find-provides b/find-provides new file mode 100755 index 000000000..b28d1028f --- /dev/null +++ b/find-provides @@ -0,0 +1,44 @@ +#!/usr/bin/python +# +# find-provides: munge the provides dependencies from the kabideps file +# +# This software may be freely redistributed under the terms of the GNU +# General Public License (GPL). +# +# Takes a directory prefix, then outputs the kabideps file contents. + +__author__ = "Jon Masters " +__version__ = "1.0" +__date__ = "Tue 25 Jul 2006 04:00 GMT" +__copyright__ = "Copyright (C) 2006 Red Hat, Inc" +__license__ = "GPL" + +import os +import re +import string +import sys + +false = 0 +true = 1 + +kabideps="" + +p = re.compile('^(.*)/symvers-(.*).gz$') +while true: + foo = sys.stdin.readline() + if foo == "": + break + string.split(foo) + m = p.match(foo) + if m: + kabideps=sys.argv[1] + "/kernel-" + m.group(2) + "-kabideps" + +if kabideps == "": + sys.exit(0) + +if not (os.path.isfile(kabideps)): + sys.stderr.write(sys.argv[0] + ": cannot locate kabideps file: " + kabideps + "\n") + sys.exit(1) + +sys.stderr.write(sys.argv[0] + ": processing kABI: " + kabideps) +os.system("cat " + kabideps) diff --git a/fix_xen_guest_on_old_EC2.patch b/fix_xen_guest_on_old_EC2.patch new file mode 100644 index 000000000..e86200295 --- /dev/null +++ b/fix_xen_guest_on_old_EC2.patch @@ -0,0 +1,34 @@ + +Legacy hypervisors (RHEL 5.0 and RHEL 5.1) do not handle guest writes to +cr4 gracefully. If a guest attempts to write a bit of cr4 that is +unsupported, then the HV is so offended it crashes the domain. While +later guest kernels (such as RHEL6) don't assume the HV supports all +features, they do expect nicer responses. That assumption introduced +code that probes whether or not xsave is supported early in the boot. So +now when attempting to boot a RHEL6 guest on RHEL5.0 or RHEL5.1 an early +crash will occur. + +This patch is quite obviously an undesirable hack. The real fix for this +problem should be in the HV, and is, in later HVs. However, to support +running on old HVs, RHEL6 can take this small change. No impact will +occur for running on any RHEL HV (not even RHEL 5.5 supports xsave). +There is only potential for guest performance loss on upstream Xen. + +--- + arch/x86/xen/enlighten.c | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) + +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 52f8e19..6db3d67 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -802,6 +802,7 @@ static void xen_write_cr4(unsigned long cr4) + { + cr4 &= ~X86_CR4_PGE; + cr4 &= ~X86_CR4_PSE; ++ cr4 &= ~X86_CR4_OSXSAVE; + + native_write_cr4(cr4); + } +-- +1.6.6.1 diff --git a/genkey b/genkey new file mode 100644 index 000000000..49c6ce8be --- /dev/null +++ b/genkey @@ -0,0 +1,7 @@ +%pubring kernel.pub +%secring kernel.sec +Key-Type: DSA +Key-Length: 512 +Name-Real: Red Hat, Inc. +Name-Comment: Kernel Module GPG key +%commit diff --git a/git-bluetooth.patch b/git-bluetooth.patch new file mode 100644 index 000000000..e69de29bb diff --git a/git-cpufreq.patch b/git-cpufreq.patch new file mode 100644 index 000000000..e69de29bb diff --git a/git-linus.diff b/git-linus.diff new file mode 100644 index 000000000..e69de29bb diff --git a/hda_intel-prealloc-4mb-dmabuffer.patch b/hda_intel-prealloc-4mb-dmabuffer.patch new file mode 100644 index 000000000..36e6aca4f --- /dev/null +++ b/hda_intel-prealloc-4mb-dmabuffer.patch @@ -0,0 +1,47 @@ +From c69fcbd1f60b0842f7c1ad2c95692ffd19c4932b Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 29 Mar 2010 23:56:08 -0400 +Subject: hda_intel-prealloc-4mb-dmabuffer + +--- + sound/pci/hda/hda_intel.c | 14 +++++++++++++- + 1 files changed, 13 insertions(+), 1 deletions(-) + +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 4bb9067..37db515 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1986,6 +1986,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, + struct azx_pcm *apcm; + int pcm_dev = cpcm->device; + int s, err; ++ size_t prealloc_min = 64*1024; /* 64KB */ + + if (pcm_dev >= HDA_MAX_PCMS) { + snd_printk(KERN_ERR SFX "Invalid PCM device number %d\n", +@@ -2019,10 +2020,21 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, + if (cpcm->stream[s].substreams) + snd_pcm_set_ops(pcm, s, &azx_pcm_ops); + } ++ + /* buffer pre-allocation */ ++ ++ /* subtle, don't allocate a big buffer for modems... ++ * also, don't just test 32BIT_MASK, since azx supports ++ * 64-bit DMA in some cases. ++ */ ++ /* lennart wants a 2.2MB buffer for 2sec of 48khz */ ++ if (pcm->dev_class == SNDRV_PCM_CLASS_GENERIC && ++ chip->pci->dma_mask >= DMA_32BIT_MASK) ++ prealloc_min = 4 * 1024 * 1024; /* 4MB */ ++ + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, + snd_dma_pci_data(chip->pci), +- 1024 * 64, 32 * 1024 * 1024); ++ prealloc_min, 32 * 1024 * 1024); + return 0; + } + +-- +1.7.0.1 + diff --git a/hdpvr-ir-enable.patch b/hdpvr-ir-enable.patch new file mode 100644 index 000000000..15b039e00 --- /dev/null +++ b/hdpvr-ir-enable.patch @@ -0,0 +1,216 @@ + drivers/media/video/hdpvr/Makefile | 4 +- + drivers/media/video/hdpvr/hdpvr-core.c | 12 ++--- + drivers/media/video/hdpvr/hdpvr-i2c.c | 83 ++++++++++++++++++++++---------- + drivers/media/video/hdpvr/hdpvr.h | 2 +- + 4 files changed, 64 insertions(+), 37 deletions(-) + +diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile +index e0230fc..a0b9a43 100644 +--- a/drivers/media/video/hdpvr/Makefile ++++ b/drivers/media/video/hdpvr/Makefile +@@ -1,6 +1,4 @@ +-hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o +- +-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o ++hdpvr-objs := hdpvr-control.o hdpvr-i2c.o hdpvr-core.o hdpvr-video.o + + obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o + +diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c +index 2fc9865..c72793a 100644 +--- a/drivers/media/video/hdpvr/hdpvr-core.c ++++ b/drivers/media/video/hdpvr/hdpvr-core.c +@@ -364,9 +364,8 @@ static int hdpvr_probe(struct usb_interface *interface, + goto error; + } + +-#ifdef CONFIG_I2C +- /* until i2c is working properly */ +- retval = 0; /* hdpvr_register_i2c_adapter(dev); */ ++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ++ retval = hdpvr_register_i2c_adapter(dev); + if (retval < 0) { + v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n"); + goto error; +@@ -412,12 +411,9 @@ static void hdpvr_disconnect(struct usb_interface *interface) + mutex_unlock(&dev->io_mutex); + + /* deregister I2C adapter */ +-#ifdef CONFIG_I2C ++#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE) + mutex_lock(&dev->i2c_mutex); +- if (dev->i2c_adapter) +- i2c_del_adapter(dev->i2c_adapter); +- kfree(dev->i2c_adapter); +- dev->i2c_adapter = NULL; ++ i2c_del_adapter(&dev->i2c_adapter); + mutex_unlock(&dev->i2c_mutex); + #endif /* CONFIG_I2C */ + +diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c +index 463b81b..a0557e4 100644 +--- a/drivers/media/video/hdpvr/hdpvr-i2c.c ++++ b/drivers/media/video/hdpvr/hdpvr-i2c.c +@@ -10,6 +10,8 @@ + * + */ + ++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ++ + #include + #include + +@@ -22,8 +24,11 @@ + #define REQTYPE_I2C_WRITE 0xb0 + #define REQTYPE_I2C_WRITE_STATT 0xd0 + +-static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, +- char *data, int len) ++#define HDPVR_HW_Z8F0811_IR_TX_I2C_ADDR 0x70 ++#define HDPVR_HW_Z8F0811_IR_RX_I2C_ADDR 0x71 ++ ++static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, ++ unsigned char addr, char *data, int len) + { + int ret; + char *buf = kmalloc(len, GFP_KERNEL); +@@ -33,7 +38,7 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, + ret = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), + REQTYPE_I2C_READ, CTRL_READ_REQUEST, +- 0x100|addr, 0, buf, len, 1000); ++ (bus << 8) | addr, 0, buf, len, 1000); + + if (ret == len) { + memcpy(data, buf, len); +@@ -46,8 +51,8 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, + return ret; + } + +-static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, +- char *data, int len) ++static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus, ++ unsigned char addr, char *data, int len) + { + int ret; + char *buf = kmalloc(len, GFP_KERNEL); +@@ -58,7 +63,7 @@ static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, + ret = usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, +- 0x100|addr, 0, buf, len, 1000); ++ (bus << 8) | addr, 0, buf, len, 1000); + + if (ret < 0) + goto error; +@@ -68,7 +73,7 @@ static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, + REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST, + 0, 0, buf, 2, 1000); + +- if (ret == 2) ++ if ((ret == 2) && (buf[1] == (len - 1))) + ret = 0; + else if (ret >= 0) + ret = -EIO; +@@ -93,10 +98,10 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs, + addr = msgs[i].addr << 1; + + if (msgs[i].flags & I2C_M_RD) +- retval = hdpvr_i2c_read(dev, addr, msgs[i].buf, ++ retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf, + msgs[i].len); + else +- retval = hdpvr_i2c_write(dev, addr, msgs[i].buf, ++ retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf, + msgs[i].len); + } + +@@ -115,31 +120,59 @@ static struct i2c_algorithm hdpvr_algo = { + .functionality = hdpvr_functionality, + }; + ++static struct i2c_adapter hdpvr_i2c_adapter_template = { ++ .name = "Hauppage HD PVR I2C", ++ .owner = THIS_MODULE, ++ .id = I2C_HW_B_HDPVR, ++ .algo = &hdpvr_algo, ++ .class = I2C_CLASS_TV_ANALOG, ++}; ++ ++static struct i2c_board_info hdpvr_i2c_board_info = { ++ I2C_BOARD_INFO("ir_tx_z8f0811_haup", HDPVR_HW_Z8F0811_IR_TX_I2C_ADDR), ++ I2C_BOARD_INFO("ir_rx_z8f0811_haup", HDPVR_HW_Z8F0811_IR_RX_I2C_ADDR), ++}; ++ ++static int hdpvr_activate_ir(struct hdpvr_device *dev) ++{ ++ char buffer[8]; ++ ++ mutex_lock(&dev->i2c_mutex); ++ ++ hdpvr_i2c_read(dev, 0, 0x54, buffer, 1); ++ ++ buffer[0] = 0; ++ buffer[1] = 0x8; ++ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2); ++ ++ buffer[1] = 0x18; ++ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2); ++ ++ mutex_unlock(&dev->i2c_mutex); ++ ++ return 0; ++} ++ + int hdpvr_register_i2c_adapter(struct hdpvr_device *dev) + { +- struct i2c_adapter *i2c_adap; + int retval = -ENOMEM; + +- i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); +- if (i2c_adap == NULL) +- goto error; ++ hdpvr_activate_ir(dev); + +- strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C", +- sizeof(i2c_adap->name)); +- i2c_adap->algo = &hdpvr_algo; +- i2c_adap->class = I2C_CLASS_TV_ANALOG; +- i2c_adap->owner = THIS_MODULE; +- i2c_adap->dev.parent = &dev->udev->dev; ++ memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template, ++ sizeof(struct i2c_adapter)); ++ dev->i2c_adapter.dev.parent = &dev->udev->dev; + +- i2c_set_adapdata(i2c_adap, dev); ++ i2c_set_adapdata(&dev->i2c_adapter, dev); + +- retval = i2c_add_adapter(i2c_adap); ++ retval = i2c_add_adapter(&dev->i2c_adapter); ++ if (retval) ++ goto error; + +- if (!retval) +- dev->i2c_adapter = i2c_adap; +- else +- kfree(i2c_adap); ++ i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info); + + error: + return retval; + } ++ ++#endif /* CONFIG_I2C */ +diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h +index 49ae25d..8a5729a 100644 +--- a/drivers/media/video/hdpvr/hdpvr.h ++++ b/drivers/media/video/hdpvr/hdpvr.h +@@ -102,7 +102,7 @@ struct hdpvr_device { + struct work_struct worker; + + /* I2C adapter */ +- struct i2c_adapter *i2c_adapter; ++ struct i2c_adapter i2c_adapter; + /* I2C lock */ + struct mutex i2c_mutex; + diff --git a/i915-fix-crt-hotplug-regression.patch b/i915-fix-crt-hotplug-regression.patch new file mode 100644 index 000000000..253bf67fc --- /dev/null +++ b/i915-fix-crt-hotplug-regression.patch @@ -0,0 +1,85 @@ +From dec23057518b7035117a1a732aa48be6d34f1be8 Mon Sep 17 00:00:00 2001 +From: Andrew Lutomirski +Date: Sat, 12 Jun 2010 09:21:18 +0000 +Subject: i915: Fix CRT hotplug regression in 2.6.35-rc1 + +Commit 7a772c492fcfffae812ffca78a628e76fa57fe58 has two bugs which +made the hotplug problems on my laptop worse instead of better. + +First, it did not, in fact, disable the CRT plug interrupt -- it +disabled all the other hotplug interrupts. It seems rather doubtful +that that bit of the patch fixed anything, so let's just remove it. +(If you want to add it back, you probably meant ~CRT_HOTPLUG_INT_EN.) + +Second, on at least my GM45, setting CRT_HOTPLUG_ACTIVATION_PERIOD_64 +and CRT_HOTPLUG_VOLTAGE_COMPARE_50 (when they were previously unset) +causes a hotplug interrupt about three seconds later. The old code +never restored PORT_HOTPLUG_EN so this could only happen once, but +they new code restores those registers. So just set those bits when +we set up the interrupt in the first place. + +Signed-off-by: Andy Lutomirski +--- + drivers/gpu/drm/i915/i915_irq.c | 12 +++++++++++- + drivers/gpu/drm/i915/i915_reg.h | 1 - + drivers/gpu/drm/i915/intel_crt.c | 6 ------ + 3 files changed, 11 insertions(+), 8 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 2479be0..7acb1a6 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -1400,8 +1400,18 @@ int i915_driver_irq_postinstall(struct drm_device *dev) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; +- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) ++ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; ++ ++ /* Programming the CRT detection parameters tends ++ to generate a spurious hotplug event about three ++ seconds later. So just do it once. ++ */ ++ if (IS_G4X(dev)) ++ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; ++ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; ++ } ++ + /* Ignore TV since it's buggy */ + + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 64b0a3a..d390b17 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -1130,7 +1130,6 @@ + #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) + #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) + #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) +-#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ + + #define PORT_HOTPLUG_STAT 0x61114 + #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +index 22ff384..ee0732b 100644 +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -234,14 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + else + tries = 1; + hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); +- hotplug_en &= CRT_HOTPLUG_MASK; + hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; + +- if (IS_G4X(dev)) +- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; +- +- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; +- + for (i = 0; i < tries ; i++) { + unsigned long timeout; + /* turn on the FORCE_DETECT */ +-- +1.7.0.1 + diff --git a/inotify-fix-inotify-oneshot-support.patch b/inotify-fix-inotify-oneshot-support.patch new file mode 100644 index 000000000..ba63e1090 --- /dev/null +++ b/inotify-fix-inotify-oneshot-support.patch @@ -0,0 +1,25 @@ +#607327 + +During the large inotify rewrite to fsnotify I completely dropped support +for IN_ONESHOT. Reimplement that support. + +Signed-off-by: Eric Paris +--- + + fs/notify/inotify/inotify_fsnotify.c | 3 +++ + 1 files changed, 3 insertions(+), 0 deletions(-) + +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c +index daa666a..388a150 100644 +--- a/fs/notify/inotify/inotify_fsnotify.c ++++ b/fs/notify/inotify/inotify_fsnotify.c +@@ -126,6 +126,9 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev + ret = 0; + } + ++ if (entry->mask & IN_ONESHOT) ++ fsnotify_destroy_mark_by_entry(entry); ++ + /* + * If we hold the entry until after the event is on the queue + * IN_IGNORED won't be able to pass this event in the queue diff --git a/inotify-send-IN_UNMOUNT-events.patch b/inotify-send-IN_UNMOUNT-events.patch new file mode 100644 index 000000000..cf1d4c4bf --- /dev/null +++ b/inotify-send-IN_UNMOUNT-events.patch @@ -0,0 +1,29 @@ +#607327 ? + +Since the .31 or so notify rewrite inotify has not sent events about +inodes which are unmounted. This patch restores those events. + +Signed-off-by: Eric Paris +--- + + fs/notify/inotify/inotify_user.c | 7 +++++-- + 1 files changed, 5 insertions(+), 2 deletions(-) + +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c +index 44aeb0f..f381daf 100644 +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -90,8 +90,11 @@ static inline __u32 inotify_arg_to_mask(u32 arg) + { + __u32 mask; + +- /* everything should accept their own ignored and cares about children */ +- mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD); ++ /* ++ * everything should accept their own ignored, cares about children, ++ * and should receive events when the inode is unmounted ++ */ ++ mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT); + + /* mask off the flags used to open the fd */ + mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT)); diff --git a/input-synaptics-relax-capability-id-checks-on-new-hardware.patch b/input-synaptics-relax-capability-id-checks-on-new-hardware.patch new file mode 100644 index 000000000..957478dcd --- /dev/null +++ b/input-synaptics-relax-capability-id-checks-on-new-hardware.patch @@ -0,0 +1,56 @@ +From: Dmitry Torokhov +Date: Wed, 21 Jul 2010 07:01:19 +0000 (-0700) +Subject: Input: synaptics - relax capability ID checks on newer hardware +X-Git-Tag: v2.6.35-rc6~1^2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3619b8fead04ab9de643712e757ef6b5f79fd1ab + +Input: synaptics - relax capability ID checks on newer hardware + +Older firmwares fixed the middle byte of the Synaptics capabilities +query to 0x47, but starting with firmware 7.5 the middle byte +represents submodel ID, sometimes also called "dash number". + +Reported-and-tested-by: Miroslav Å ulc +Signed-off-by: Dmitry Torokhov +--- + +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 9ba9c4a..705589d 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -141,8 +141,13 @@ static int synaptics_capability(struct psmouse *psmouse) + priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2]; + priv->ext_cap = priv->ext_cap_0c = 0; + +- if (!SYN_CAP_VALID(priv->capabilities)) ++ /* ++ * Older firmwares had submodel ID fixed to 0x47 ++ */ ++ if (SYN_ID_FULL(priv->identity) < 0x705 && ++ SYN_CAP_SUBMODEL_ID(priv->capabilities) != 0x47) { + return -1; ++ } + + /* + * Unless capExtended is set the rest of the flags should be ignored +diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h +index 7d4d5e1..b6aa7d2 100644 +--- a/drivers/input/mouse/synaptics.h ++++ b/drivers/input/mouse/synaptics.h +@@ -47,7 +47,7 @@ + #define SYN_CAP_FOUR_BUTTON(c) ((c) & (1 << 3)) + #define SYN_CAP_MULTIFINGER(c) ((c) & (1 << 1)) + #define SYN_CAP_PALMDETECT(c) ((c) & (1 << 0)) +-#define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47) ++#define SYN_CAP_SUBMODEL_ID(c) (((c) & 0x00ff00) >> 8) + #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) + #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) + #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) +@@ -66,6 +66,7 @@ + #define SYN_ID_MODEL(i) (((i) >> 4) & 0x0f) + #define SYN_ID_MAJOR(i) ((i) & 0x0f) + #define SYN_ID_MINOR(i) (((i) >> 16) & 0xff) ++#define SYN_ID_FULL(i) ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i)) + #define SYN_ID_IS_SYNAPTICS(i) ((((i) >> 8) & 0xff) == 0x47) + + /* synaptics special commands */ diff --git a/iwlwifi-Recover-TX-flow-failure.patch b/iwlwifi-Recover-TX-flow-failure.patch new file mode 100644 index 000000000..4d0cd5bbb --- /dev/null +++ b/iwlwifi-Recover-TX-flow-failure.patch @@ -0,0 +1,162 @@ +From 5b51e801eef53be8e521316eea9e78e5c4595fd4 Mon Sep 17 00:00:00 2001 +From: Wey-Yi Guy +Date: Thu, 4 Mar 2010 13:38:59 -0800 +Subject: [PATCH] iwlwifi: Recover TX flow failure + +Monitors the tx statistics to detect the drop in throughput. +When the throughput drops, the ratio of the actual_ack_count and the +expected_ack_count also drops. At the same time, the aggregated +ba_timeout (the number of ba timeout retries) also rises. If the +actual_ack_count/expected_ack_count ratio is 0 and the number of ba +timeout retries rises to BA_TIMEOUT_MAX, no tx packets can be delivered. +Reloading the uCode and bring the system back to normal operational +state. + +Signed-off-by: Trieu 'Andrew' Nguyen +Signed-off-by: Wey-Yi Guy +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl-agn.c | 14 ++++++++- + drivers/net/wireless/iwlwifi/iwl-dev.h | 21 +++++++++++++ + drivers/net/wireless/iwlwifi/iwl-rx.c | 50 +++++++++++++++++++++++++++++++- + 3 files changed, 83 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c +index 07a9a02..dc751cb 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -2965,10 +2965,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, + return ret; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); +- return iwl_tx_agg_start(priv, sta->addr, tid, ssn); ++ ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn); ++ if (ret == 0) { ++ priv->_agn.agg_tids_count++; ++ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", ++ priv->_agn.agg_tids_count); ++ } ++ return ret; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl_tx_agg_stop(priv, sta->addr, tid); ++ if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { ++ priv->_agn.agg_tids_count--; ++ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", ++ priv->_agn.agg_tids_count); ++ } + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else +@@ -3399,6 +3410,7 @@ static int iwl_init_drv(struct iwl_priv *priv) + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; ++ priv->_agn.agg_tids_count = 0; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = +diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h +index 447e14b..e2a6b76 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-dev.h ++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h +@@ -1275,6 +1275,26 @@ struct iwl_priv { + void *shared_virt; + dma_addr_t shared_phys; + /*End*/ ++ union { ++#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE) ++ struct { ++ /* INT ICT Table */ ++ __le32 *ict_tbl; ++ void *ict_tbl_vir; ++ dma_addr_t ict_tbl_dma; ++ dma_addr_t aligned_ict_tbl_dma; ++ int ict_index; ++ u32 inta; ++ bool use_ict; ++ /* ++ * reporting the number of tids has AGG on. 0 means ++ * no AGGREGATION ++ */ ++ u8 agg_tids_count; ++ } _agn; ++#endif ++ }; ++ + struct iwl_hw_params hw_params; + + /* INT ICT Table */ +diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c +index fabc52f..f48d685 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-rx.c ++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c +@@ -617,9 +617,18 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, + + #define REG_RECALIB_PERIOD (60) + ++/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ ++#define ACK_CNT_RATIO (50) ++#define BA_TIMEOUT_CNT (5) ++#define BA_TIMEOUT_MAX (16) ++ + #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" + /* +- * This function checks for plcp error. ++ * This function checks for plcp error, ACK count ratios, aggregated BA ++ * timeout retries. ++ * - When the ACK count ratio is 0 and aggregated BA timeout retries is ++ * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting ++ * the firmware. + * - When the plcp error is exceeding the thresholds, it will reset the radio + * to improve the throughput. + */ +@@ -629,6 +638,45 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; ++ int actual_ack_cnt_delta; ++ int expected_ack_cnt_delta; ++ int ba_timeout_delta; ++ ++ actual_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.actual_ack_cnt); ++ expected_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.expected_ack_cnt); ++ ba_timeout_delta = ++ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - ++ le32_to_cpu(priv->statistics.tx.agg.ba_timeout); ++ if ((priv->_agn.agg_tids_count > 0) && ++ (expected_ack_cnt_delta > 0) && ++ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) ++ < ACK_CNT_RATIO) && ++ (ba_timeout_delta > BA_TIMEOUT_CNT)) { ++ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," ++ " expected_ack_cnt = %d\n", ++ actual_ack_cnt_delta, expected_ack_cnt_delta); ++ ++#ifdef CONFIG_IWLWIFI_DEBUG ++ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", ++ priv->delta_statistics.tx.rx_detected_cnt); ++ IWL_DEBUG_RADIO(priv, ++ "ack_or_ba_timeout_collision delta = %d\n", ++ priv->delta_statistics.tx. ++ ack_or_ba_timeout_collision); ++#endif ++ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", ++ ba_timeout_delta); ++ if ((actual_ack_cnt_delta == 0) && ++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) { ++ IWL_DEBUG_RADIO(priv, ++ "call iwl_force_reset(IWL_FW_RESET)\n"); ++ iwl_force_reset(priv, IWL_FW_RESET); ++ } ++ } + + /* + * check for plcp_err and trigger radio reset if it exceeds +-- +1.7.0.1 + diff --git a/iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch b/iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch new file mode 100644 index 000000000..f2e0241c9 --- /dev/null +++ b/iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch @@ -0,0 +1,504 @@ +From 8483e011e2ad5d5b27ebe4517fedf13e16d0b7cb Mon Sep 17 00:00:00 2001 +From: Wey-Yi Guy +Date: Mon, 1 Mar 2010 17:23:50 -0800 +Subject: [PATCH] iwlwifi: Recover TX flow stall due to stuck queue + +Monitors the internal TX queues periodically. When a queue is stuck +for some unknown conditions causing the throughput to drop and the +transfer is stop, the driver will force firmware reload and bring the +system back to normal operational state. + +The iwlwifi devices behave differently in this regard so this feature is +made part of the ops infrastructure so we can have more control on how to +monitor and recover from tx queue stall case per device. + +Signed-off-by: Trieu 'Andrew' Nguyen +Signed-off-by: Wey-Yi Guy +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl-1000.c | 3 + + drivers/net/wireless/iwlwifi/iwl-3945.c | 2 + + drivers/net/wireless/iwlwifi/iwl-4965.c | 1 + + drivers/net/wireless/iwlwifi/iwl-5000.c | 9 +++ + drivers/net/wireless/iwlwifi/iwl-6000.c | 8 ++ + drivers/net/wireless/iwlwifi/iwl-agn.c | 16 +++++ + drivers/net/wireless/iwlwifi/iwl-core.c | 93 +++++++++++++++++++++++++++ + drivers/net/wireless/iwlwifi/iwl-core.h | 7 ++ + drivers/net/wireless/iwlwifi/iwl-dev.h | 10 +++ + drivers/net/wireless/iwlwifi/iwl-tx.c | 2 + + drivers/net/wireless/iwlwifi/iwl3945-base.c | 16 +++++ + 11 files changed, 167 insertions(+), 0 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c +index 3bf2e6e..89dc401 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-1000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c +@@ -211,6 +211,7 @@ static struct iwl_lib_ops iwl1000_lib = { + .set_ct_kill = iwl1000_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static const struct iwl_ops iwl1000_ops = { +@@ -248,6 +249,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl1000_bg_cfg = { +@@ -276,6 +278,7 @@ struct iwl_cfg iwl1000_bg_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c +index 0728054..caebec4 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c +@@ -2827,6 +2827,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { + .led_compensation = 64, + .broken_powersave = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + static struct iwl_cfg iwl3945_abg_cfg = { +@@ -2845,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = { + .led_compensation = 64, + .broken_powersave = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c +index 8972166..aa49a6e 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c +@@ -2251,6 +2251,7 @@ struct iwl_cfg iwl4965_agn_cfg = { + .led_compensation = 61, + .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + /* Module firmware */ +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c +index e476acb..d05fad4 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c +@@ -1500,6 +1500,7 @@ struct iwl_lib_ops iwl5000_lib = { + .set_ct_kill = iwl5000_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_lib_ops iwl5150_lib = { +@@ -1554,6 +1555,7 @@ static struct iwl_lib_ops iwl5150_lib = { + .set_ct_kill = iwl5150_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static const struct iwl_ops iwl5000_ops = { +@@ -1603,6 +1605,7 @@ struct iwl_cfg iwl5300_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5100_bgn_cfg = { +@@ -1629,6 +1632,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5100_abg_cfg = { +@@ -1653,6 +1657,7 @@ struct iwl_cfg iwl5100_abg_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5100_agn_cfg = { +@@ -1679,6 +1684,7 @@ struct iwl_cfg iwl5100_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5350_agn_cfg = { +@@ -1705,6 +1711,7 @@ struct iwl_cfg iwl5350_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5150_agn_cfg = { +@@ -1731,6 +1738,7 @@ struct iwl_cfg iwl5150_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5150_abg_cfg = { +@@ -1755,6 +1763,7 @@ struct iwl_cfg iwl5150_abg_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); +diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c +index 92b3e64..0c965cd 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-6000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c +@@ -277,6 +277,7 @@ static struct iwl_lib_ops iwl6000_lib = { + .set_ct_kill = iwl6000_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static const struct iwl_ops iwl6000_ops = { +@@ -342,6 +343,7 @@ static struct iwl_lib_ops iwl6050_lib = { + .set_calib_version = iwl6050_set_calib_version, + }, + .add_bcast_station = iwl_add_bcast_station, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static const struct iwl_ops iwl6050_ops = { +@@ -385,6 +387,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6000i_2abg_cfg = { +@@ -416,6 +419,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6000i_2bg_cfg = { +@@ -447,6 +451,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6050_2agn_cfg = { +@@ -479,6 +484,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6050_2abg_cfg = { +@@ -510,6 +516,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6000_3agn_cfg = { +@@ -542,6 +549,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c +index bdff565..07a9a02 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -2106,6 +2106,13 @@ static void iwl_alive_start(struct iwl_priv *priv) + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ /* Enable timer to monitor the driver queues */ ++ mod_timer(&priv->monitor_recover, ++ jiffies + ++ msecs_to_jiffies(priv->cfg->monitor_recover_period)); ++ } ++ + if (iwl_is_rfkill(priv)) + return; + +@@ -3316,6 +3323,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) + priv->ucode_trace.data = (unsigned long)priv; + priv->ucode_trace.function = iwl_bg_ucode_trace; + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ init_timer(&priv->monitor_recover); ++ priv->monitor_recover.data = (unsigned long)priv; ++ priv->monitor_recover.function = ++ priv->cfg->ops->lib->recover_from_tx_stall; ++ } ++ + if (!priv->cfg->use_isr_legacy) + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); +@@ -3336,6 +3350,8 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) + cancel_work_sync(&priv->beacon_update); + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); ++ if (priv->cfg->ops->lib->recover_from_tx_stall) ++ del_timer_sync(&priv->monitor_recover); + } + + static void iwl_init_hw_rates(struct iwl_priv *priv, +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c +index 049b652..a5a2de6 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c +@@ -3403,6 +3403,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode) + } + return 0; + } ++EXPORT_SYMBOL(iwl_force_reset); ++ ++/** ++ * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover ++ * ++ * During normal condition (no queue is stuck), the timer is continually set to ++ * execute every monitor_recover_period milliseconds after the last timer ++ * expired. When the queue read_ptr is at the same place, the timer is ++ * shorten to 100mSecs. This is ++ * 1) to reduce the chance that the read_ptr may wrap around (not stuck) ++ * 2) to detect the stuck queues quicker before the station and AP can ++ * disassociate each other. ++ * ++ * This function monitors all the tx queues and recover from it if any ++ * of the queues are stuck. ++ * 1. It first check the cmd queue for stuck conditions. If it is stuck, ++ * it will recover by resetting the firmware and return. ++ * 2. Then, it checks for station association. If it associates it will check ++ * other queues. If any queue is stuck, it will recover by resetting ++ * the firmware. ++ * Note: It the number of times the queue read_ptr to be at the same place to ++ * be MAX_REPEAT+1 in order to consider to be stuck. ++ */ ++/* ++ * The maximum number of times the read pointer of the tx queue at the ++ * same place without considering to be stuck. ++ */ ++#define MAX_REPEAT (2) ++static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt) ++{ ++ struct iwl_tx_queue *txq; ++ struct iwl_queue *q; ++ ++ txq = &priv->txq[cnt]; ++ q = &txq->q; ++ /* queue is empty, skip */ ++ if (q->read_ptr != q->write_ptr) { ++ if (q->read_ptr == q->last_read_ptr) { ++ /* a queue has not been read from last time */ ++ if (q->repeat_same_read_ptr > MAX_REPEAT) { ++ IWL_ERR(priv, ++ "queue %d stuck %d time. Fw reload.\n", ++ q->id, q->repeat_same_read_ptr); ++ q->repeat_same_read_ptr = 0; ++ iwl_force_reset(priv, IWL_FW_RESET); ++ } else { ++ q->repeat_same_read_ptr++; ++ IWL_DEBUG_RADIO(priv, ++ "queue %d, not read %d time\n", ++ q->id, ++ q->repeat_same_read_ptr); ++ mod_timer(&priv->monitor_recover, jiffies + ++ msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS)); ++ } ++ return 1; ++ } else { ++ q->last_read_ptr = q->read_ptr; ++ q->repeat_same_read_ptr = 0; ++ } ++ } ++ return 0; ++} ++ ++void iwl_bg_monitor_recover(unsigned long data) ++{ ++ struct iwl_priv *priv = (struct iwl_priv *)data; ++ int cnt; ++ ++ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ++ return; ++ ++ /* monitor and check for stuck cmd queue */ ++ if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM)) ++ return; ++ ++ /* monitor and check for other stuck queues */ ++ if (iwl_is_associated(priv)) { ++ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { ++ /* skip as we already checked the command queue */ ++ if (cnt == IWL_CMD_QUEUE_NUM) ++ continue; ++ if (iwl_check_stuck_queue(priv, cnt)) ++ return; ++ } ++ } ++ /* ++ * Reschedule the timer to occur in ++ * priv->cfg->monitor_recover_period ++ */ ++ mod_timer(&priv->monitor_recover, ++ jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period)); ++} ++EXPORT_SYMBOL(iwl_bg_monitor_recover); + + #ifdef CONFIG_PM + +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h +index 36940a9..9076576 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.h ++++ b/drivers/net/wireless/iwlwifi/iwl-core.h +@@ -191,6 +191,8 @@ struct iwl_lib_ops { + struct iwl_temp_ops temp_ops; + /* station management */ + void (*add_bcast_station)(struct iwl_priv *priv); ++ /* recover from tx queue stall */ ++ void (*recover_from_tx_stall)(unsigned long data); + }; + + struct iwl_led_ops { +@@ -295,6 +297,8 @@ struct iwl_cfg { + const bool support_wimax_coexist; + u8 plcp_delta_threshold; + s32 chain_noise_scale; ++ /* timer period for monitor the driver queues */ ++ u32 monitor_recover_period; + }; + + /*************************** +@@ -577,6 +581,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) + pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; + } ++ ++void iwl_bg_monitor_recover(unsigned long data); ++ + #ifdef CONFIG_PM + int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); + int iwl_pci_resume(struct pci_dev *pdev); +diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h +index ef1720a..447e14b 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-dev.h ++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h +@@ -183,6 +183,10 @@ struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ ++ /* use for monitoring and recovering the stuck queue */ ++ int last_read_ptr; /* storing the last read_ptr */ ++ /* number of time read_ptr and last_read_ptr are the same */ ++ u8 repeat_same_read_ptr; + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; +@@ -1039,6 +1043,11 @@ struct iwl_event_log { + #define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) + #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + ++/* timer constants use to monitor and recover stuck tx queues in mSecs */ ++#define IWL_MONITORING_PERIOD (1000) ++#define IWL_ONE_HUNDRED_MSECS (100) ++#define IWL_SIXTY_SECS (60000) ++ + enum iwl_reset { + IWL_RF_RESET = 0, + IWL_FW_RESET, +@@ -1339,6 +1348,7 @@ struct iwl_priv { + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list ucode_trace; ++ struct timer_list monitor_recover; + bool hw_ready; + /*For 3945*/ + #define IWL_DEFAULT_TX_POWER 0x0F +diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c +index 8dd0c03..6af23f2 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-tx.c ++++ b/drivers/net/wireless/iwlwifi/iwl-tx.c +@@ -310,6 +310,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; ++ q->last_read_ptr = 0; ++ q->repeat_same_read_ptr = 0; + + return 0; + } +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c +index 19c77a8..853511e 100644 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c +@@ -2513,6 +2513,13 @@ static void iwl3945_alive_start(struct iwl_priv *priv) + /* After the ALIVE response, we can send commands to 3945 uCode */ + set_bit(STATUS_ALIVE, &priv->status); + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ /* Enable timer to monitor the driver queues */ ++ mod_timer(&priv->monitor_recover, ++ jiffies + ++ msecs_to_jiffies(priv->cfg->monitor_recover_period)); ++ } ++ + if (iwl_is_rfkill(priv)) + return; + +@@ -3789,6 +3796,13 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv) + + iwl3945_hw_setup_deferred_work(priv); + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ init_timer(&priv->monitor_recover); ++ priv->monitor_recover.data = (unsigned long)priv; ++ priv->monitor_recover.function = ++ priv->cfg->ops->lib->recover_from_tx_stall; ++ } ++ + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl3945_irq_tasklet, (unsigned long)priv); + } +@@ -3801,6 +3815,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); ++ if (priv->cfg->ops->lib->recover_from_tx_stall) ++ del_timer_sync(&priv->monitor_recover); + } + + static struct attribute *iwl3945_sysfs_entries[] = { +-- +1.7.0.1 + diff --git a/iwlwifi-add-internal-short-scan-support-for-3945.patch b/iwlwifi-add-internal-short-scan-support-for-3945.patch new file mode 100644 index 000000000..6a0d54a2c --- /dev/null +++ b/iwlwifi-add-internal-short-scan-support-for-3945.patch @@ -0,0 +1,90 @@ +From dcde3533b9f501ad079c297b3bf7659739c4c287 Mon Sep 17 00:00:00 2001 +From: Wey-Yi Guy +Date: Wed, 24 Feb 2010 08:28:30 -0800 +Subject: [PATCH] iwlwifi: add internal short scan support for 3945 + +Add internal short scan support for 3945 NIC, This allows 3945 NIC +to support radio reset request like the other series of NICs. + +Signed-off-by: Wey-Yi Guy +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl3945-base.c | 22 ++++++++++++++-------- + 1 files changed, 14 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c +index b74a56c..19c77a8 100644 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c +@@ -2821,7 +2821,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + .len = sizeof(struct iwl3945_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; +- int rc = 0; + struct iwl3945_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 n_probes = 0; +@@ -2849,7 +2848,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " + "Ignoring second request.\n"); +- rc = -EIO; + goto done; + } + +@@ -2884,7 +2882,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { +- rc = -ENOMEM; ++ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); + goto done; + } + } +@@ -2927,7 +2925,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + scan_suspend_time, interval); + } + +- if (priv->scan_request->n_ssids) { ++ if (priv->is_internal_short_scan) { ++ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); ++ } else if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { +@@ -2975,13 +2975,20 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + goto done; + } + +- scan->tx_cmd.len = cpu_to_le16( ++ if (!priv->is_internal_short_scan) { ++ scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan))); +- ++ } else { ++ scan->tx_cmd.len = cpu_to_le16( ++ iwl_fill_probe_req(priv, ++ (struct ieee80211_mgmt *)scan->data, ++ NULL, 0, ++ IWL_MAX_SCAN_SIZE - sizeof(*scan))); ++ } + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + +@@ -3003,8 +3010,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); +- rc = iwl_send_cmd_sync(priv, &cmd); +- if (rc) ++ if (iwl_send_cmd_sync(priv, &cmd)) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, +-- +1.7.0.1 + diff --git a/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch b/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch new file mode 100644 index 000000000..465d2ac73 --- /dev/null +++ b/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch @@ -0,0 +1,58 @@ +commit a69b03e941abae00380fc6bc1877fb797a1b31e6 +Author: John W. Linville +Date: Mon Jun 14 14:30:25 2010 -0400 + + iwlwifi: cancel scan watchdog in iwl_bg_abort_scan + + Avoids this: + + WARNING: at net/mac80211/scan.c:312 ieee80211_scan_completed+0x5f/0x1f1 + [mac80211]() + Hardware name: Latitude E5400 + Modules linked in: aes_x86_64 aes_generic fuse ipt_MASQUERADE iptable_nat + nf_nat rfcomm sco bridge stp llc bnep l2cap sunrpc cpufreq_ondemand + acpi_cpufreq freq_table xt_physdev ip6t_REJECT nf_conntrack_ipv6 + ip6table_filter ip6_tables ipv6 kvm_intel kvm uinput arc4 ecb + snd_hda_codec_intelhdmi snd_hda_codec_idt snd_hda_intel iwlagn snd_hda_codec + snd_hwdep snd_seq snd_seq_device iwlcore snd_pcm dell_wmi sdhci_pci sdhci + iTCO_wdt tg3 dell_laptop mmc_core i2c_i801 wmi mac80211 snd_timer + iTCO_vendor_support btusb joydev dcdbas cfg80211 bluetooth snd soundcore + microcode rfkill snd_page_alloc firewire_ohci firewire_core crc_itu_t + yenta_socket rsrc_nonstatic i915 drm_kms_helper drm i2c_algo_bit i2c_core video + output [last unloaded: scsi_wait_scan] + Pid: 979, comm: iwlagn Tainted: G W 2.6.33.3-85.fc13.x86_64 #1 + Call Trace: + [] warn_slowpath_common+0x77/0x8f + [] warn_slowpath_null+0xf/0x11 + [] ieee80211_scan_completed+0x5f/0x1f1 [mac80211] + [] iwl_bg_scan_completed+0xbb/0x17a [iwlcore] + [] worker_thread+0x1a4/0x232 + [] ? iwl_bg_scan_completed+0x0/0x17a [iwlcore] + [] ? autoremove_wake_function+0x0/0x34 + [] ? worker_thread+0x0/0x232 + [] kthread+0x7a/0x82 + [] kernel_thread_helper+0x4/0x10 + [] ? kthread+0x0/0x82 + [] ? kernel_thread_helper+0x0/0x10 + + Reported here: + + https://bugzilla.redhat.com/show_bug.cgi?id=590436 + + Signed-off-by: John W. Linville + Reported-by: Mihai Harpau + Cc: stable@kernel.org + Acked-by: Reinette Chatre + +diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c +index 5d3f51f..386c5f9 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-scan.c ++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c +@@ -491,6 +491,7 @@ void iwl_bg_abort_scan(struct work_struct *work) + + mutex_lock(&priv->mutex); + ++ cancel_delayed_work_sync(&priv->scan_check); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + diff --git a/iwlwifi-code-cleanup-for-connectivity-recovery.patch b/iwlwifi-code-cleanup-for-connectivity-recovery.patch new file mode 100644 index 000000000..4a3ab5ca8 --- /dev/null +++ b/iwlwifi-code-cleanup-for-connectivity-recovery.patch @@ -0,0 +1,278 @@ +From 56cf16e34b896ac40c6707eb053d45d2cab18bbd Mon Sep 17 00:00:00 2001 +From: Wey-Yi Guy +Date: Fri, 5 Mar 2010 14:22:46 -0800 +Subject: [PATCH] iwlwifi: code cleanup for connectivity recovery + +Split the connectivity check and recovery routine into separated +functions based on the types + 1. iwl_good_ack_health() - check for ack count + 2. iwl_good_plcp_health() - check for plcp error + +Based on the type of errors being detected, different recovery methods +will be used to bring the system back to normal operational state. + +Because different NIC has different HW and uCode, the behavior is also +different; these functions thus now form part of the ops infrastructure, +so we can have more control on how to monitor and recover from error condition +case per device. + +Signed-off-by: Wey-Yi Guy +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl-1000.c | 3 +- + drivers/net/wireless/iwlwifi/iwl-4965.c | 2 +- + drivers/net/wireless/iwlwifi/iwl-5000.c | 6 +- + drivers/net/wireless/iwlwifi/iwl-6000.c | 6 +- + drivers/net/wireless/iwlwifi/iwl-core.h | 11 +++- + drivers/net/wireless/iwlwifi/iwl-rx.c | 97 +++++++++++++++++++++---------- + 6 files changed, 85 insertions(+), 40 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c +index 2597574..7087631 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-1000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c +@@ -212,7 +212,8 @@ static struct iwl_lib_ops iwl1000_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static const struct iwl_ops iwl1000_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c +index 6dd4328..dcca310 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c +@@ -2217,7 +2217,7 @@ static struct iwl_lib_ops iwl4965_lib = { + .set_ct_kill = iwl4965_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, + }; + + static const struct iwl_ops iwl4965_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c +index 0c2469c..8e0dd13 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c +@@ -1501,7 +1501,8 @@ struct iwl_lib_ops iwl5000_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static struct iwl_lib_ops iwl5150_lib = { +@@ -1557,7 +1558,8 @@ static struct iwl_lib_ops iwl5150_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static const struct iwl_ops iwl5000_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c +index 189a8ce..1d4fea1 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-6000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c +@@ -278,7 +278,8 @@ static struct iwl_lib_ops iwl6000_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static const struct iwl_ops iwl6000_ops = { +@@ -345,7 +346,8 @@ static struct iwl_lib_ops iwl6050_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static const struct iwl_ops iwl6050_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h +index d67048e..5234a85 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.h ++++ b/drivers/net/wireless/iwlwifi/iwl-core.h +@@ -193,8 +193,11 @@ struct iwl_lib_ops { + void (*add_bcast_station)(struct iwl_priv *priv); + /* recover from tx queue stall */ + void (*recover_from_tx_stall)(unsigned long data); +- /* recover from errors showed in statistics */ +- void (*recover_from_statistics)(struct iwl_priv *priv, ++ /* check for plcp health */ ++ bool (*check_plcp_health)(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); ++ /* check for ack health */ ++ bool (*check_ack_health)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); + }; + +@@ -438,7 +441,9 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +-void iwl_recover_from_statistics(struct iwl_priv *priv, ++bool iwl_good_plcp_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); ++bool iwl_good_ack_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c +index f48d685..506ccf7 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-rx.c ++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c +@@ -622,24 +622,18 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, + #define BA_TIMEOUT_CNT (5) + #define BA_TIMEOUT_MAX (16) + +-#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" +-/* +- * This function checks for plcp error, ACK count ratios, aggregated BA +- * timeout retries. +- * - When the ACK count ratio is 0 and aggregated BA timeout retries is +- * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting +- * the firmware. +- * - When the plcp error is exceeding the thresholds, it will reset the radio +- * to improve the throughput. ++/** ++ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. ++ * ++ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding ++ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal ++ * operation state. + */ +-void iwl_recover_from_statistics(struct iwl_priv *priv, +- struct iwl_rx_packet *pkt) ++bool iwl_good_ack_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) + { +- int combined_plcp_delta; +- unsigned int plcp_msec; +- unsigned long plcp_received_jiffies; +- int actual_ack_cnt_delta; +- int expected_ack_cnt_delta; ++ bool rc = true; ++ int actual_ack_cnt_delta, expected_ack_cnt_delta; + int ba_timeout_delta; + + actual_ack_cnt_delta = +@@ -670,13 +664,27 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, + #endif + IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", + ba_timeout_delta); +- if ((actual_ack_cnt_delta == 0) && +- (ba_timeout_delta >= BA_TIMEOUT_MAX)) { +- IWL_DEBUG_RADIO(priv, +- "call iwl_force_reset(IWL_FW_RESET)\n"); +- iwl_force_reset(priv, IWL_FW_RESET); +- } ++ if (!actual_ack_cnt_delta && ++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) ++ rc = false; + } ++ return rc; ++} ++EXPORT_SYMBOL(iwl_good_ack_health); ++ ++/** ++ * iwl_good_plcp_health - checks for plcp error. ++ * ++ * When the plcp error is exceeding the thresholds, reset the radio ++ * to improve the throughput. ++ */ ++bool iwl_good_plcp_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) ++{ ++ bool rc = true; ++ int combined_plcp_delta; ++ unsigned int plcp_msec; ++ unsigned long plcp_received_jiffies; + + /* + * check for plcp_err and trigger radio reset if it exceeds +@@ -711,7 +719,8 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, + * combined_plcp_delta, + * plcp_msec + */ +- IWL_DEBUG_RADIO(priv, PLCP_MSG, ++ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " ++ "%u, %u, %u, %u, %d, %u mSecs\n", + priv->cfg->plcp_delta_threshold, + le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), + le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), +@@ -719,15 +728,42 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, + le32_to_cpu( + priv->statistics.rx.ofdm_ht.plcp_err), + combined_plcp_delta, plcp_msec); +- /* +- * Reset the RF radio due to the high plcp +- * error rate +- */ +- iwl_force_reset(priv, IWL_RF_RESET); ++ rc = false; ++ } ++ } ++ return rc; ++} ++EXPORT_SYMBOL(iwl_good_plcp_health); ++ ++static void iwl_recover_from_statistics(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) ++{ ++ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ++ return; ++ if (iwl_is_associated(priv)) { ++ if (priv->cfg->ops->lib->check_ack_health) { ++ if (!priv->cfg->ops->lib->check_ack_health( ++ priv, pkt)) { ++ /* ++ * low ack count detected ++ * restart Firmware ++ */ ++ IWL_ERR(priv, "low ack count detected, " ++ "restart firmware\n"); ++ iwl_force_reset(priv, IWL_FW_RESET); ++ } ++ } else if (priv->cfg->ops->lib->check_plcp_health) { ++ if (!priv->cfg->ops->lib->check_plcp_health( ++ priv, pkt)) { ++ /* ++ * high plcp error detected ++ * reset Radio ++ */ ++ iwl_force_reset(priv, IWL_RF_RESET); ++ } + } + } + } +-EXPORT_SYMBOL(iwl_recover_from_statistics); + + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +@@ -749,8 +785,7 @@ void iwl_rx_statistics(struct iwl_priv *priv, + #ifdef CONFIG_IWLWIFI_DEBUG + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); + #endif +- if (priv->cfg->ops->lib->recover_from_statistics) +- priv->cfg->ops->lib->recover_from_statistics(priv, pkt); ++ iwl_recover_from_statistics(priv, pkt); + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + +-- +1.7.0.1 + diff --git a/iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch b/iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch new file mode 100644 index 000000000..3fbc641a3 --- /dev/null +++ b/iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch @@ -0,0 +1,150 @@ +From 8ac33071b4c991e302be67fd0dae1d9cc4b502e3 Mon Sep 17 00:00:00 2001 +From: Wey-Yi Guy +Date: Tue, 16 Mar 2010 10:46:31 -0700 +Subject: [PATCH] iwlwifi: iwl_good_ack_health() only apply to AGN device + +iwl_good_ack_health() check for expected and actual ack count which only +apply to aggregation mode. Move the function to iwlagn module. + +Reported-by: Chantry Xavier +Signed-off-by: Wey-Yi Guy +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl-agn.c | 54 +++++++++++++++++++++++++++++++ + drivers/net/wireless/iwlwifi/iwl-rx.c | 55 -------------------------------- + 2 files changed, 54 insertions(+), 55 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c +index dc751cb..b5d410b 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -1448,6 +1448,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) + iwl_enable_interrupts(priv); + } + ++/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ ++#define ACK_CNT_RATIO (50) ++#define BA_TIMEOUT_CNT (5) ++#define BA_TIMEOUT_MAX (16) ++ ++/** ++ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. ++ * ++ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding ++ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal ++ * operation state. ++ */ ++bool iwl_good_ack_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) ++{ ++ bool rc = true; ++ int actual_ack_cnt_delta, expected_ack_cnt_delta; ++ int ba_timeout_delta; ++ ++ actual_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.actual_ack_cnt); ++ expected_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.expected_ack_cnt); ++ ba_timeout_delta = ++ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - ++ le32_to_cpu(priv->statistics.tx.agg.ba_timeout); ++ if ((priv->_agn.agg_tids_count > 0) && ++ (expected_ack_cnt_delta > 0) && ++ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) ++ < ACK_CNT_RATIO) && ++ (ba_timeout_delta > BA_TIMEOUT_CNT)) { ++ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," ++ " expected_ack_cnt = %d\n", ++ actual_ack_cnt_delta, expected_ack_cnt_delta); ++ ++#ifdef CONFIG_IWLWIFI_DEBUG ++ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", ++ priv->delta_statistics.tx.rx_detected_cnt); ++ IWL_DEBUG_RADIO(priv, ++ "ack_or_ba_timeout_collision delta = %d\n", ++ priv->delta_statistics.tx. ++ ack_or_ba_timeout_collision); ++#endif ++ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", ++ ba_timeout_delta); ++ if (!actual_ack_cnt_delta && ++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) ++ rc = false; ++ } ++ return rc; ++} ++ + + /****************************************************************************** + * +diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c +index 506ccf7..def5042 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-rx.c ++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c +@@ -617,61 +617,6 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, + + #define REG_RECALIB_PERIOD (60) + +-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ +-#define ACK_CNT_RATIO (50) +-#define BA_TIMEOUT_CNT (5) +-#define BA_TIMEOUT_MAX (16) +- +-/** +- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. +- * +- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding +- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal +- * operation state. +- */ +-bool iwl_good_ack_health(struct iwl_priv *priv, +- struct iwl_rx_packet *pkt) +-{ +- bool rc = true; +- int actual_ack_cnt_delta, expected_ack_cnt_delta; +- int ba_timeout_delta; +- +- actual_ack_cnt_delta = +- le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - +- le32_to_cpu(priv->statistics.tx.actual_ack_cnt); +- expected_ack_cnt_delta = +- le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - +- le32_to_cpu(priv->statistics.tx.expected_ack_cnt); +- ba_timeout_delta = +- le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - +- le32_to_cpu(priv->statistics.tx.agg.ba_timeout); +- if ((priv->_agn.agg_tids_count > 0) && +- (expected_ack_cnt_delta > 0) && +- (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) +- < ACK_CNT_RATIO) && +- (ba_timeout_delta > BA_TIMEOUT_CNT)) { +- IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," +- " expected_ack_cnt = %d\n", +- actual_ack_cnt_delta, expected_ack_cnt_delta); +- +-#ifdef CONFIG_IWLWIFI_DEBUG +- IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", +- priv->delta_statistics.tx.rx_detected_cnt); +- IWL_DEBUG_RADIO(priv, +- "ack_or_ba_timeout_collision delta = %d\n", +- priv->delta_statistics.tx. +- ack_or_ba_timeout_collision); +-#endif +- IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", +- ba_timeout_delta); +- if (!actual_ack_cnt_delta && +- (ba_timeout_delta >= BA_TIMEOUT_MAX)) +- rc = false; +- } +- return rc; +-} +-EXPORT_SYMBOL(iwl_good_ack_health); +- + /** + * iwl_good_plcp_health - checks for plcp error. + * +-- +1.7.0.1 + diff --git a/iwlwifi-manage-QoS-by-mac-stack.patch b/iwlwifi-manage-QoS-by-mac-stack.patch new file mode 100644 index 000000000..bd0765c97 --- /dev/null +++ b/iwlwifi-manage-QoS-by-mac-stack.patch @@ -0,0 +1,361 @@ +From: Stanislaw Gruszka +To: kernel@lists.fedoraproject.org, "John W. Linville" +Subject: [PATCH 2/4 2.6.34.y] iwlwifi: manage QoS by mac stack +Date: Fri, 11 Jun 2010 17:05:12 +0200 + +commit e61146e36b40fd9d346118c40285913236c329f3 upstream. + +We activate/deactivate QoS and setup default queue parameters in iwlwifi +driver. Mac stack do the same, so we do not need repeat that work here. +Stack also will tell when disable QoS, this will fix driver when working +with older APs, that do not have QoS implemented. + +Patch make "force = true" in iwl_active_qos() assuming we always want +to do with QoS what mac stack wish. + +Patch also remove unused qos_cap bits, do not initialize qos_active = 0, +as we have it initialized to zero by kzalloc. + +Signed-off-by: Stanislaw Gruszka +--- + drivers/net/wireless/iwlwifi/iwl-agn.c | 15 --- + drivers/net/wireless/iwlwifi/iwl-core.c | 142 +++------------------------ + drivers/net/wireless/iwlwifi/iwl-core.h | 3 +- + drivers/net/wireless/iwlwifi/iwl-dev.h | 21 ---- + drivers/net/wireless/iwlwifi/iwl3945-base.c | 7 -- + 5 files changed, 17 insertions(+), 171 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c +index bdff565..21c3ef0 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -2515,7 +2515,6 @@ void iwl_post_associate(struct iwl_priv *priv) + { + struct ieee80211_conf *conf = NULL; + int ret = 0; +- unsigned long flags; + + if (priv->iw_mode == NL80211_IFTYPE_AP) { + IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); +@@ -2600,10 +2599,6 @@ void iwl_post_associate(struct iwl_priv *priv) + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->assoc_station_added = 1; + +- spin_lock_irqsave(&priv->lock, flags); +- iwl_activate_qos(priv, 0); +- spin_unlock_irqrestore(&priv->lock, flags); +- + /* the chain noise calibration will enabled PM upon completion + * If chain noise has already been run, then we need to enable + * power management here */ +@@ -2780,7 +2775,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) + void iwl_config_ap(struct iwl_priv *priv) + { + int ret = 0; +- unsigned long flags; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; +@@ -2832,10 +2826,6 @@ void iwl_config_ap(struct iwl_priv *priv) + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); +- iwl_reset_qos(priv); +- spin_lock_irqsave(&priv->lock, flags); +- iwl_activate_qos(priv, 1); +- spin_unlock_irqrestore(&priv->lock, flags); + iwl_add_bcast_station(priv); + } + iwl_send_beacon_cmd(priv); +@@ -3396,11 +3386,6 @@ static int iwl_init_drv(struct iwl_priv *priv) + + iwl_init_scan_params(priv); + +- iwl_reset_qos(priv); +- +- priv->qos_data.qos_active = 0; +- priv->qos_data.qos_cap.val = 0; +- + priv->rates_mask = IWL_RATES_MASK; + /* Set the tx_power_user_lmt to the lowest power level + * this value will get overwritten by channel max power avg +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c +index 049b652..2dd8aaa 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c +@@ -325,17 +325,13 @@ EXPORT_SYMBOL(iwl_hw_nic_init); + /* + * QoS support + */ +-void iwl_activate_qos(struct iwl_priv *priv, u8 force) ++static void iwl_update_qos(struct iwl_priv *priv) + { + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + priv->qos_data.def_qos_parm.qos_flags = 0; + +- if (priv->qos_data.qos_cap.q_AP.queue_request && +- !priv->qos_data.qos_cap.q_AP.txop_request) +- priv->qos_data.def_qos_parm.qos_flags |= +- QOS_PARAM_FLG_TXOP_TYPE_MSK; + if (priv->qos_data.qos_active) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; +@@ -343,118 +339,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force) + if (priv->current_ht_config.is_ht) + priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + +- if (force || iwl_is_associated(priv)) { +- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", +- priv->qos_data.qos_active, +- priv->qos_data.def_qos_parm.qos_flags); ++ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", ++ priv->qos_data.qos_active, ++ priv->qos_data.def_qos_parm.qos_flags); + +- iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, +- sizeof(struct iwl_qosparam_cmd), +- &priv->qos_data.def_qos_parm, NULL); +- } ++ iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, ++ sizeof(struct iwl_qosparam_cmd), ++ &priv->qos_data.def_qos_parm, NULL); + } +-EXPORT_SYMBOL(iwl_activate_qos); +- +-/* +- * AC CWmin CW max AIFSN TXOP Limit TXOP Limit +- * (802.11b) (802.11a/g) +- * AC_BK 15 1023 7 0 0 +- * AC_BE 15 1023 3 0 0 +- * AC_VI 7 15 2 6.016ms 3.008ms +- * AC_VO 3 7 2 3.264ms 1.504ms +- */ +-void iwl_reset_qos(struct iwl_priv *priv) +-{ +- u16 cw_min = 15; +- u16 cw_max = 1023; +- u8 aifs = 2; +- bool is_legacy = false; +- unsigned long flags; +- int i; +- +- spin_lock_irqsave(&priv->lock, flags); +- /* QoS always active in AP and ADHOC mode +- * In STA mode wait for association +- */ +- if (priv->iw_mode == NL80211_IFTYPE_ADHOC || +- priv->iw_mode == NL80211_IFTYPE_AP) +- priv->qos_data.qos_active = 1; +- else +- priv->qos_data.qos_active = 0; +- +- /* check for legacy mode */ +- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC && +- (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) || +- (priv->iw_mode == NL80211_IFTYPE_STATION && +- (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) { +- cw_min = 31; +- is_legacy = 1; +- } +- +- if (priv->qos_data.qos_active) +- aifs = 3; +- +- /* AC_BE */ +- priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); +- priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); +- priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; +- priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; +- priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; +- +- if (priv->qos_data.qos_active) { +- /* AC_BK */ +- i = 1; +- priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); +- priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); +- priv->qos_data.def_qos_parm.ac[i].aifsn = 7; +- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; +- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; +- +- /* AC_VI */ +- i = 2; +- priv->qos_data.def_qos_parm.ac[i].cw_min = +- cpu_to_le16((cw_min + 1) / 2 - 1); +- priv->qos_data.def_qos_parm.ac[i].cw_max = +- cpu_to_le16(cw_min); +- priv->qos_data.def_qos_parm.ac[i].aifsn = 2; +- if (is_legacy) +- priv->qos_data.def_qos_parm.ac[i].edca_txop = +- cpu_to_le16(6016); +- else +- priv->qos_data.def_qos_parm.ac[i].edca_txop = +- cpu_to_le16(3008); +- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; +- +- /* AC_VO */ +- i = 3; +- priv->qos_data.def_qos_parm.ac[i].cw_min = +- cpu_to_le16((cw_min + 1) / 4 - 1); +- priv->qos_data.def_qos_parm.ac[i].cw_max = +- cpu_to_le16((cw_min + 1) / 2 - 1); +- priv->qos_data.def_qos_parm.ac[i].aifsn = 2; +- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; +- if (is_legacy) +- priv->qos_data.def_qos_parm.ac[i].edca_txop = +- cpu_to_le16(3264); +- else +- priv->qos_data.def_qos_parm.ac[i].edca_txop = +- cpu_to_le16(1504); +- } else { +- for (i = 1; i < 4; i++) { +- priv->qos_data.def_qos_parm.ac[i].cw_min = +- cpu_to_le16(cw_min); +- priv->qos_data.def_qos_parm.ac[i].cw_max = +- cpu_to_le16(cw_max); +- priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; +- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; +- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; +- } +- } +- IWL_DEBUG_QOS(priv, "set QoS to default \n"); +- +- spin_unlock_irqrestore(&priv->lock, flags); +-} +-EXPORT_SYMBOL(iwl_reset_qos); + + #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ + #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +@@ -2306,12 +2198,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + cpu_to_le16((params->txop * 32)); + + priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; +- priv->qos_data.qos_active = 1; +- +- if (priv->iw_mode == NL80211_IFTYPE_AP) +- iwl_activate_qos(priv, 1); +- else if (priv->assoc_id && iwl_is_associated(priv)) +- iwl_activate_qos(priv, 0); + + spin_unlock_irqrestore(&priv->lock, flags); + +@@ -2587,11 +2473,8 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) + IWL_DEBUG_MAC80211(priv, "leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + +- iwl_reset_qos(priv); +- + priv->cfg->ops->lib->post_associate(priv); + +- + return 0; + } + EXPORT_SYMBOL(iwl_mac_beacon_update); +@@ -2833,6 +2716,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) + iwl_set_tx_power(priv, conf->power_level, false); + } + ++ if (changed & IEEE80211_CONF_CHANGE_QOS) { ++ bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS); ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->qos_data.qos_active = qos_active; ++ iwl_update_qos(priv); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ } ++ + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; +@@ -2867,8 +2759,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw) + memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); + spin_unlock_irqrestore(&priv->lock, flags); + +- iwl_reset_qos(priv); +- + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = 0; + priv->assoc_capability = 0; +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h +index 36940a9..70af968 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.h ++++ b/drivers/net/wireless/iwlwifi/iwl-core.h +@@ -304,8 +304,7 @@ struct iwl_cfg { + struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, + struct ieee80211_ops *hw_ops); + void iwl_hw_detect(struct iwl_priv *priv); +-void iwl_reset_qos(struct iwl_priv *priv); +-void iwl_activate_qos(struct iwl_priv *priv, u8 force); ++void iwl_activate_qos(struct iwl_priv *priv); + int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params); + void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); +diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h +index ef1720a..cc12e89 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-dev.h ++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h +@@ -519,30 +519,9 @@ struct iwl_ht_config { + u8 non_GF_STA_present; + }; + +-union iwl_qos_capabity { +- struct { +- u8 edca_count:4; /* bit 0-3 */ +- u8 q_ack:1; /* bit 4 */ +- u8 queue_request:1; /* bit 5 */ +- u8 txop_request:1; /* bit 6 */ +- u8 reserved:1; /* bit 7 */ +- } q_AP; +- struct { +- u8 acvo_APSD:1; /* bit 0 */ +- u8 acvi_APSD:1; /* bit 1 */ +- u8 ac_bk_APSD:1; /* bit 2 */ +- u8 ac_be_APSD:1; /* bit 3 */ +- u8 q_ack:1; /* bit 4 */ +- u8 max_len:2; /* bit 5-6 */ +- u8 more_data_ack:1; /* bit 7 */ +- } q_STA; +- u8 val; +-}; +- + /* QoS structures */ + struct iwl_qos_info { + int qos_active; +- union iwl_qos_capabity qos_cap; + struct iwl_qosparam_cmd def_qos_parm; + }; + +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c +index b74a56c..c054527 100644 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c +@@ -3152,8 +3152,6 @@ void iwl3945_post_associate(struct iwl_priv *priv) + break; + } + +- iwl_activate_qos(priv, 0); +- + /* we have just associated, don't start scan too early */ + priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; + } +@@ -3861,11 +3859,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + +- iwl_reset_qos(priv); +- +- priv->qos_data.qos_active = 0; +- priv->qos_data.qos_cap.val = 0; +- + priv->rates_mask = IWL_RATES_MASK; + priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; + +-- +1.6.2.5 + +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/iwlwifi-move-plcp-check-to-separated-function.patch b/iwlwifi-move-plcp-check-to-separated-function.patch new file mode 100644 index 000000000..d8052062b --- /dev/null +++ b/iwlwifi-move-plcp-check-to-separated-function.patch @@ -0,0 +1,208 @@ +From b3786de4e1033b00d522a5c457a3ea9f8376d0d0 Mon Sep 17 00:00:00 2001 +From: Wey-Yi Guy +Date: Thu, 4 Mar 2010 13:38:58 -0800 +Subject: [PATCH] iwlwifi: move plcp check to separated function + +Move the plcp error checking into stand alone function and pointed by ops +to accommodate devices not needing this recovery. + +Signed-off-by: Trieu 'Andrew' Nguyen +Signed-off-by: Wey-Yi Guy +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl-1000.c | 1 + + drivers/net/wireless/iwlwifi/iwl-4965.c | 1 + + drivers/net/wireless/iwlwifi/iwl-5000.c | 2 + + drivers/net/wireless/iwlwifi/iwl-6000.c | 2 + + drivers/net/wireless/iwlwifi/iwl-core.h | 5 +++ + drivers/net/wireless/iwlwifi/iwl-rx.c | 58 +++++++++++++++++++------------ + 6 files changed, 47 insertions(+), 22 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c +index 89dc401..2597574 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-1000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c +@@ -212,6 +212,7 @@ static struct iwl_lib_ops iwl1000_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static const struct iwl_ops iwl1000_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c +index aa49a6e..6dd4328 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c +@@ -2217,6 +2217,7 @@ static struct iwl_lib_ops iwl4965_lib = { + .set_ct_kill = iwl4965_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static const struct iwl_ops iwl4965_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c +index d05fad4..0c2469c 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c +@@ -1501,6 +1501,7 @@ struct iwl_lib_ops iwl5000_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static struct iwl_lib_ops iwl5150_lib = { +@@ -1556,6 +1557,7 @@ static struct iwl_lib_ops iwl5150_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static const struct iwl_ops iwl5000_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c +index 0c965cd..189a8ce 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-6000.c ++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c +@@ -278,6 +278,7 @@ static struct iwl_lib_ops iwl6000_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static const struct iwl_ops iwl6000_ops = { +@@ -344,6 +345,7 @@ static struct iwl_lib_ops iwl6050_lib = { + }, + .add_bcast_station = iwl_add_bcast_station, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static const struct iwl_ops iwl6050_ops = { +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h +index 9076576..d67048e 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.h ++++ b/drivers/net/wireless/iwlwifi/iwl-core.h +@@ -193,6 +193,9 @@ struct iwl_lib_ops { + void (*add_bcast_station)(struct iwl_priv *priv); + /* recover from tx queue stall */ + void (*recover_from_tx_stall)(unsigned long data); ++ /* recover from errors showed in statistics */ ++ void (*recover_from_statistics)(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); + }; + + struct iwl_led_ops { +@@ -435,6 +438,8 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); ++void iwl_recover_from_statistics(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + void iwl_reply_statistics(struct iwl_priv *priv, +diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c +index e5eb339..fabc52f 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-rx.c ++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c +@@ -618,28 +618,18 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, + #define REG_RECALIB_PERIOD (60) + + #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" +-void iwl_rx_statistics(struct iwl_priv *priv, +- struct iwl_rx_mem_buffer *rxb) ++/* ++ * This function checks for plcp error. ++ * - When the plcp error is exceeding the thresholds, it will reset the radio ++ * to improve the throughput. ++ */ ++void iwl_recover_from_statistics(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) + { +- int change; +- struct iwl_rx_packet *pkt = rxb_addr(rxb); + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; + +- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", +- (int)sizeof(priv->statistics), +- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); +- +- change = ((priv->statistics.general.temperature != +- pkt->u.stats.general.temperature) || +- ((priv->statistics.flag & +- STATISTICS_REPLY_FLG_HT40_MODE_MSK) != +- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +- +-#ifdef CONFIG_IWLWIFI_DEBUG +- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); +-#endif + /* + * check for plcp_err and trigger radio reset if it exceeds + * the plcp error threshold plcp_delta. +@@ -660,11 +650,11 @@ void iwl_rx_statistics(struct iwl_priv *priv, + le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); + + if ((combined_plcp_delta > 0) && +- ((combined_plcp_delta * 100) / plcp_msec) > ++ ((combined_plcp_delta * 100) / plcp_msec) > + priv->cfg->plcp_delta_threshold) { + /* +- * if plcp_err exceed the threshold, the following +- * data is printed in csv format: ++ * if plcp_err exceed the threshold, ++ * the following data is printed in csv format: + * Text: plcp_err exceeded %d, + * Received ofdm.plcp_err, + * Current ofdm.plcp_err, +@@ -679,9 +669,8 @@ void iwl_rx_statistics(struct iwl_priv *priv, + le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), + le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), + le32_to_cpu( +- priv->statistics.rx.ofdm_ht.plcp_err), ++ priv->statistics.rx.ofdm_ht.plcp_err), + combined_plcp_delta, plcp_msec); +- + /* + * Reset the RF radio due to the high plcp + * error rate +@@ -689,6 +678,31 @@ void iwl_rx_statistics(struct iwl_priv *priv, + iwl_force_reset(priv, IWL_RF_RESET); + } + } ++} ++EXPORT_SYMBOL(iwl_recover_from_statistics); ++ ++void iwl_rx_statistics(struct iwl_priv *priv, ++ struct iwl_rx_mem_buffer *rxb) ++{ ++ int change; ++ struct iwl_rx_packet *pkt = rxb_addr(rxb); ++ ++ ++ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", ++ (int)sizeof(priv->statistics), ++ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); ++ ++ change = ((priv->statistics.general.temperature != ++ pkt->u.stats.general.temperature) || ++ ((priv->statistics.flag & ++ STATISTICS_REPLY_FLG_HT40_MODE_MSK) != ++ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); ++ ++#ifdef CONFIG_IWLWIFI_DEBUG ++ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); ++#endif ++ if (priv->cfg->ops->lib->recover_from_statistics) ++ priv->cfg->ops->lib->recover_from_statistics(priv, pkt); + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + +-- +1.7.0.1 + diff --git a/iwlwifi-recover_from_tx_stall.patch b/iwlwifi-recover_from_tx_stall.patch new file mode 100644 index 000000000..a4df5b797 --- /dev/null +++ b/iwlwifi-recover_from_tx_stall.patch @@ -0,0 +1,12 @@ +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c +index 0728054..ae43a43 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c +@@ -2792,6 +2792,7 @@ static struct iwl_lib_ops iwl3945_lib = { + .isr = iwl_isr_legacy, + .config_ap = iwl3945_config_ap, + .add_bcast_station = iwl3945_add_bcast_station, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { diff --git a/kbuild-fix-modpost-segfault.patch b/kbuild-fix-modpost-segfault.patch new file mode 100644 index 000000000..73d2d78c9 --- /dev/null +++ b/kbuild-fix-modpost-segfault.patch @@ -0,0 +1,43 @@ +From: Krzysztof Halasa +Date: Thu, 10 Jun 2010 23:08:20 +0000 (+0200) +Subject: kbuild: Fix modpost segfault +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=1c938663d58b5b2965976a6f54cc51b5d6f691aa + +kbuild: Fix modpost segfault + +Alan writes: + +> program: /home/alan/GitTrees/linux-2.6-mid-ref/scripts/mod/modpost -o +> Module.symvers -S vmlinux.o +> +> Program received signal SIGSEGV, Segmentation fault. + +It just hit me. +It's the offset calculation in reloc_location() which overflows: + return (void *)elf->hdr + sechdrs[section].sh_offset + + (r->r_offset - sechdrs[section].sh_addr); + +E.g. for the first rodata r entry: +r->r_offset < sechdrs[section].sh_addr +and the expression in the parenthesis produces 0xFFFFFFE0 or something +equally wise. + +Reported-by: Alan +Signed-off-by: Krzysztof HaÅ‚asa +Tested-by: Alan +Signed-off-by: Michal Marek +--- + +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c +index 3318692..f877900 100644 +--- a/scripts/mod/modpost.c ++++ b/scripts/mod/modpost.c +@@ -1342,7 +1342,7 @@ static unsigned int *reloc_location(struct elf_info *elf, + int section = sechdr->sh_info; + + return (void *)elf->hdr + sechdrs[section].sh_offset + +- (r->r_offset - sechdrs[section].sh_addr); ++ r->r_offset - sechdrs[section].sh_addr; + } + + static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) diff --git a/kernel.spec b/kernel.spec new file mode 100644 index 000000000..8b2804ce8 --- /dev/null +++ b/kernel.spec @@ -0,0 +1,2338 @@ +# We have to override the new %%install behavior because, well... the kernel is special. +%global __spec_install_pre %{___build_pre} + +Summary: The Linux kernel + +# For a stable, released kernel, released_kernel should be 1. For rawhide +# and/or a kernel built from an rc or git snapshot, released_kernel should +# be 0. +%global released_kernel 1 + +# Save original buildid for later if it's defined +%if 0%{?buildid:1} +%global orig_buildid %{buildid} +%undefine buildid +%endif + +################################################################### +# Polite request for people who spin their own kernel rpms: +# please modify the "buildid" define in a way that identifies +# that the kernel isn't the stock distribution kernel, for example, +# by setting the define to ".local" or ".bz123456". This will be +# appended to the full kernel version. +# +# (Uncomment the '#' and both spaces below to set the buildid.) +# +# % define buildid .local +################################################################### + +# The buildid can also be specified on the rpmbuild command line +# by adding --define="buildid .whatever". If both the specfile and +# the environment define a buildid they will be concatenated together. +%if 0%{?orig_buildid:1} +%if 0%{?buildid:1} +%global srpm_buildid %{buildid} +%define buildid %{srpm_buildid}%{orig_buildid} +%else +%define buildid %{orig_buildid} +%endif +%endif + +# fedora_build defines which build revision of this kernel version we're +# building. Rather than incrementing forever, as with the prior versioning +# setup, we set fedora_cvs_origin to the current cvs revision s/1.// of the +# kernel spec when the kernel is rebased, so fedora_build automatically +# works out to the offset from the rebase, so it doesn't get too ginormous. +# +# If you're building on a branch, the RCS revision will be something like +# 1.1205.1.1. In this case we drop the initial 1, subtract fedora_cvs_origin +# from the second number, and then append the rest of the RCS string as is. +# Don't stare at the awk too long, you'll go blind. +%define fedora_cvs_origin 2084 +%define fedora_cvs_revision() %2 +%global fedora_build %(echo %{fedora_cvs_origin}.%{fedora_cvs_revision $Revision: 1.2114 $} | awk -F . '{ OFS = "."; ORS = ""; print $3 - $1 ; i = 4 ; OFS = ""; while (i <= NF) { print ".", $i ; i++} }') + +# base_sublevel is the kernel version we're starting with and patching +# on top of -- for example, 2.6.22-rc7-git1 starts with a 2.6.21 base, +# which yields a base_sublevel of 21. +%define base_sublevel 34 + +## If this is a released kernel ## +%if 0%{?released_kernel} + +# Do we have a -stable update to apply? +%define stable_update 1 +# Is it a -stable RC? +%define stable_rc 0 +# Set rpm version accordingly +%if 0%{?stable_update} +%define stablerev .%{stable_update} +%define stable_base %{stable_update} +%if 0%{?stable_rc} +# stable RCs are incremental patches, so we need the previous stable patch +%define stable_base %(echo $((%{stable_update} - 1))) +%endif +%endif +%define rpmversion 2.6.%{base_sublevel}%{?stablerev} + +## The not-released-kernel case ## +%else +# The next upstream release sublevel (base_sublevel+1) +%define upstream_sublevel %(echo $((%{base_sublevel} + 1))) +# The rc snapshot level +%define rcrev 0 +# The git snapshot level +%define gitrev 0 +# Set rpm version accordingly +%define rpmversion 2.6.%{upstream_sublevel} +%endif +# Nb: The above rcrev and gitrev values automagically define Patch00 and Patch01 below. + +# What parts do we want to build? We must build at least one kernel. +# These are the kernels that are built IF the architecture allows it. +# All should default to 1 (enabled) and be flipped to 0 (disabled) +# by later arch-specific checks. + +# The following build options are enabled by default. +# Use either --without in your rpmbuild command or force values +# to 0 in here to disable them. +# +# standard kernel +%define with_up %{?_without_up: 0} %{?!_without_up: 1} +# kernel-smp (only valid for ppc 32-bit) +%define with_smp %{?_without_smp: 0} %{?!_without_smp: 1} +# kernel-debug +%define with_debug %{?_without_debug: 0} %{?!_without_debug: 1} +# kernel-doc +%define with_doc %{?_without_doc: 0} %{?!_without_doc: 1} +# kernel-headers +%define with_headers %{?_without_headers: 0} %{?!_without_headers: 1} +# kernel-firmware +%define with_firmware %{?_with_firmware: 1} %{?!_with_firmware: 0} +# tools/perf +%define with_perftool %{?_without_perftool: 0} %{?!_without_perftool: 1} +# perf noarch subpkg +%define with_perf %{?_without_perf: 0} %{?!_without_perf: 1} +# kernel-debuginfo +%define with_debuginfo %{?_without_debuginfo: 0} %{?!_without_debuginfo: 1} +# kernel-bootwrapper (for creating zImages from kernel + initrd) +%define with_bootwrapper %{?_without_bootwrapper: 0} %{?!_without_bootwrapper: 1} +# Want to build a the vsdo directories installed +%define with_vdso_install %{?_without_vdso_install: 0} %{?!_without_vdso_install: 1} + +# Build the kernel-doc package, but don't fail the build if it botches. +# Here "true" means "continue" and "false" means "fail the build". +%if 0%{?released_kernel} +%define doc_build_fail true +%else +%define doc_build_fail true +%endif + +%define rawhide_skip_docs 0 +%if 0%{?rawhide_skip_docs} +%define with_doc 0 +%endif + +# Additional options for user-friendly one-off kernel building: +# +# Only build the base kernel (--with baseonly): +%define with_baseonly %{?_with_baseonly: 1} %{?!_with_baseonly: 0} +# Only build the smp kernel (--with smponly): +%define with_smponly %{?_with_smponly: 1} %{?!_with_smponly: 0} +# Only build the debug kernel (--with dbgonly): +%define with_dbgonly %{?_with_dbgonly: 1} %{?!_with_dbgonly: 0} + +# should we do C=1 builds with sparse +%define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} + +# Set debugbuildsenabled to 1 for production (build separate debug kernels) +# and 0 for rawhide (all kernels are debug kernels). +# See also 'make debug' and 'make release'. +%define debugbuildsenabled 1 + +# Want to build a vanilla kernel build without any non-upstream patches? +# (well, almost none, we need nonintconfig for build purposes). Default to 0 (off). +%define with_vanilla %{?_with_vanilla: 1} %{?!_with_vanilla: 0} + +# pkg_release is what we'll fill in for the rpm Release: field +%if 0%{?released_kernel} + +%if 0%{?stable_rc} +%define stable_rctag .rc%{stable_rc} +%endif +%define pkg_release %{fedora_build}%{?stable_rctag}%{?buildid}%{?dist} + +%else + +# non-released_kernel +%if 0%{?rcrev} +%define rctag .rc%rcrev +%else +%define rctag .rc0 +%endif +%if 0%{?gitrev} +%define gittag .git%gitrev +%else +%define gittag .git0 +%endif +%define pkg_release 0.%{fedora_build}%{?rctag}%{?gittag}%{?buildid}%{?dist} + +%endif + +# The kernel tarball/base version +%define kversion 2.6.%{base_sublevel} + +%define make_target bzImage + +%define KVERREL %{version}-%{release}.%{_target_cpu} +%define hdrarch %_target_cpu +%define asmarch %_target_cpu + +%if 0%{!?nopatches:1} +%define nopatches 0 +%endif + +%if %{with_vanilla} +%define nopatches 1 +%endif + +%if %{nopatches} +%define with_bootwrapper 0 +%define variant -vanilla +%else +%define variant_fedora -fedora +%endif + +%define using_upstream_branch 0 +%if 0%{?upstream_branch:1} +%define stable_update 0 +%define using_upstream_branch 1 +%define variant -%{upstream_branch}%{?variant_fedora} +%define pkg_release 0.%{fedora_build}%{upstream_branch_tag}%{?buildid}%{?dist} +%endif + +%if !%{debugbuildsenabled} +%define with_debug 0 +%endif + +%if !%{with_debuginfo} +%define _enable_debug_packages 0 +%endif +%define debuginfodir /usr/lib/debug + +# kernel-PAE is only built on i686. +%ifarch i686 +%define with_pae 1 +%else +%define with_pae 0 +%endif + +# if requested, only build base kernel +%if %{with_baseonly} +%define with_smp 0 +%define with_debug 0 +%endif + +# if requested, only build smp kernel +%if %{with_smponly} +%define with_up 0 +%define with_debug 0 +%endif + +# if requested, only build debug kernel +%if %{with_dbgonly} +%if %{debugbuildsenabled} +%define with_up 0 +%endif +%define with_smp 0 +%define with_pae 0 +%define with_perftool 0 +%endif + +%define all_x86 i386 i686 + +%if %{with_vdso_install} +# These arches install vdso/ directories. +%define vdso_arches %{all_x86} x86_64 ppc ppc64 +%endif + +# Overrides for generic default options + +# only ppc and alphav56 need separate smp kernels +%ifnarch ppc alphaev56 +%define with_smp 0 +%endif + +# don't do debug builds on anything but i686 and x86_64 +%ifnarch i686 x86_64 +%define with_debug 0 +%endif + +# only package docs noarch +%ifnarch noarch +%define with_doc 0 +%define with_perf 0 +%endif + +# don't build noarch kernels or headers (duh) +%ifarch noarch +%define with_up 0 +%define with_headers 0 +%define all_arch_configs kernel-%{version}-*.config +%define with_firmware %{?_with_firmware: 1} %{?!_with_firmware: 0} +%endif + +# bootwrapper is only on ppc +%ifnarch ppc ppc64 +%define with_bootwrapper 0 +%endif + +# sparse blows up on ppc64 alpha and sparc64 +%ifarch ppc64 ppc alpha sparc64 +%define with_sparse 0 +%endif + +# Per-arch tweaks + +%ifarch %{all_x86} +%define asmarch x86 +%define hdrarch i386 +%define all_arch_configs kernel-%{version}-i?86*.config +%define image_install_path boot +%define kernel_image arch/x86/boot/bzImage +%endif + +%ifarch x86_64 +%define asmarch x86 +%define all_arch_configs kernel-%{version}-x86_64*.config +%define image_install_path boot +%define kernel_image arch/x86/boot/bzImage +%endif + +%ifarch ppc64 +%define asmarch powerpc +%define hdrarch powerpc +%define all_arch_configs kernel-%{version}-ppc64*.config +%define image_install_path boot +%define make_target vmlinux +%define kernel_image vmlinux +%define kernel_image_elf 1 +%endif + +%ifarch s390x +%define asmarch s390 +%define hdrarch s390 +%define all_arch_configs kernel-%{version}-s390x.config +%define image_install_path boot +%define make_target image +%define kernel_image arch/s390/boot/image +%endif + +%ifarch sparc64 +%define asmarch sparc +%define all_arch_configs kernel-%{version}-sparc64*.config +%define make_target image +%define kernel_image arch/sparc/boot/image +%define image_install_path boot +%define with_perftool 0 +%endif + +%ifarch ppc +%define asmarch powerpc +%define hdrarch powerpc +%define all_arch_configs kernel-%{version}-ppc{-,.}*config +%define image_install_path boot +%define make_target vmlinux +%define kernel_image vmlinux +%define kernel_image_elf 1 +%endif + +%ifarch ia64 +%define all_arch_configs kernel-%{version}-ia64*.config +%define image_install_path boot/efi/EFI/redhat +%define make_target compressed +%define kernel_image vmlinux.gz +%endif + +%ifarch alpha alphaev56 +%define all_arch_configs kernel-%{version}-alpha*.config +%define image_install_path boot +%define make_target vmlinux +%define kernel_image vmlinux +%endif + +%ifarch %{arm} +%define all_arch_configs kernel-%{version}-arm*.config +%define image_install_path boot +%define hdrarch arm +%define make_target vmlinux +%define kernel_image vmlinux +%endif + +%if %{nopatches} +# XXX temporary until last vdso patches are upstream +%define vdso_arches ppc ppc64 +%endif + +%if %{nopatches}%{using_upstream_branch} +# Ignore unknown options in our config-* files. +# Some options go with patches we're not applying. +%define oldconfig_target loose_nonint_oldconfig +%else +%define oldconfig_target nonint_oldconfig +%endif + +# To temporarily exclude an architecture from being built, add it to +# %nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we +# don't build kernel-headers then the new build system will no longer let +# us use the previous build of that package -- it'll just be completely AWOL. +# Which is a BadThing(tm). + +# We only build kernel-headers on the following... +%define nobuildarches i386 s390 sparc %{arm} + +%ifarch %nobuildarches +%define with_up 0 +%define with_smp 0 +%define with_pae 0 +%define with_debuginfo 0 +%define with_perftool 0 +%define _enable_debug_packages 0 +%endif + +%define with_pae_debug 0 +%if %{with_pae} +%define with_pae_debug %{with_debug} +%endif + +# +# Three sets of minimum package version requirements in the form of Conflicts: +# to versions below the minimum +# + +# +# First the general kernel 2.6 required versions as per +# Documentation/Changes +# +%define kernel_dot_org_conflicts ppp < 2.4.3-3, isdn4k-utils < 3.2-32, nfs-utils < 1.0.7-12, e2fsprogs < 1.37-4, util-linux < 2.12, jfsutils < 1.1.7-2, reiserfs-utils < 3.6.19-2, xfsprogs < 2.6.13-4, procps < 3.2.5-6.3, oprofile < 0.9.1-2 + +# +# Then a series of requirements that are distribution specific, either +# because we add patches for something, or the older versions have +# problems with the newer kernel or lack certain things that make +# integration in the distro harder than needed. +# +%define package_conflicts initscripts < 7.23, udev < 063-6, iptables < 1.3.2-1, ipw2200-firmware < 2.4, iwl4965-firmware < 228.57.2, selinux-policy-targeted < 1.25.3-14, squashfs-tools < 4.0, wireless-tools < 29-3 + +# We moved the drm include files into kernel-headers, make sure there's +# a recent enough libdrm-devel on the system that doesn't have those. +%define kernel_headers_conflicts libdrm-devel < 2.4.0-0.15 + +# +# Packages that need to be installed before the kernel is, because the %post +# scripts use them. +# +%define kernel_prereq fileutils, module-init-tools, initscripts >= 8.11.1-1, grubby >= 7.0.10-1 +%define initrd_prereq dracut >= 001-7 + +# +# This macro does requires, provides, conflicts, obsoletes for a kernel package. +# %%kernel_reqprovconf +# It uses any kernel__conflicts and kernel__obsoletes +# macros defined above. +# +%define kernel_reqprovconf \ +Provides: kernel = %{rpmversion}-%{pkg_release}\ +Provides: kernel-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:.%{1}}\ +Provides: kernel-drm = 4.3.0\ +Provides: kernel-drm-nouveau = 16\ +Provides: kernel-modeset = 1\ +Provides: kernel-uname-r = %{KVERREL}%{?1:.%{1}}\ +Requires(pre): %{kernel_prereq}\ +Requires(pre): %{initrd_prereq}\ +%if %{with_firmware}\ +Requires(pre): kernel-firmware >= %{rpmversion}-%{pkg_release}\ +%else\ +Requires(pre): linux-firmware\ +%if %{with_perftool}\ +Requires(pre): elfutils-libs\ +%endif\ +%endif\ +Requires(post): /sbin/new-kernel-pkg\ +Requires(preun): /sbin/new-kernel-pkg\ +Conflicts: %{kernel_dot_org_conflicts}\ +Conflicts: %{package_conflicts}\ +%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\ +# We can't let RPM do the dependencies automatic because it'll then pick up\ +# a correct but undesirable perl dependency from the module headers which\ +# isn't required for the kernel proper to function\ +AutoReq: no\ +AutoProv: yes\ +%{nil} + +Name: kernel%{?variant} +Group: System Environment/Kernel +License: GPLv2 +URL: http://www.kernel.org/ +Version: %{rpmversion} +Release: %{pkg_release} +# DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. +# SET %%nobuildarches (ABOVE) INSTEAD +ExclusiveArch: noarch %{all_x86} x86_64 ppc ppc64 ia64 sparc sparc64 s390 s390x alpha alphaev56 %{arm} +ExclusiveOS: Linux + +%kernel_reqprovconf +%ifarch x86_64 sparc64 +Obsoletes: kernel-smp +%endif + + +# +# List the packages used during the kernel build +# +BuildRequires: module-init-tools, patch >= 2.5.4, bash >= 2.03, sh-utils, tar +BuildRequires: bzip2, findutils, gzip, m4, perl, make >= 3.78, diffutils, gawk +BuildRequires: gcc >= 3.4.2, binutils >= 2.12, redhat-rpm-config +BuildRequires: net-tools +BuildRequires: xmlto, asciidoc +%if %{with_sparse} +BuildRequires: sparse >= 0.4.1 +%endif +%if %{with_perftool} +BuildRequires: elfutils-devel zlib-devel binutils-devel +%endif +BuildConflicts: rhbuildsys(DiskFree) < 500Mb + +%define fancy_debuginfo 0 +%if %{with_debuginfo} +%if 0%{?fedora} >= 8 || 0%{?rhel} >= 6 +%define fancy_debuginfo 1 +%endif +%endif + +%if %{fancy_debuginfo} +# Fancy new debuginfo generation introduced in Fedora 8. +BuildRequires: rpm-build >= 4.4.2.1-4 +%define debuginfo_args --strict-build-id +%endif + +Source0: ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-%{kversion}.tar.bz2 + +Source11: genkey +Source14: find-provides +Source15: merge.pl + +Source20: Makefile.config +Source21: config-debug +Source22: config-nodebug +Source23: config-generic +Source24: config-rhel-generic + +Source30: config-x86-generic +Source31: config-i686-PAE + +Source40: config-x86_64-generic + +Source50: config-powerpc-generic +Source51: config-powerpc32-generic +Source52: config-powerpc32-smp +Source53: config-powerpc64 + +Source60: config-ia64-generic + +Source70: config-s390x + +Source90: config-sparc64-generic + +Source100: config-arm + +Source200: perf + +# Here should be only the patches up to the upstream canonical Linus tree. + +# For a stable release kernel +%if 0%{?stable_update} +%if 0%{?stable_base} +%define stable_patch_00 patch-2.6.%{base_sublevel}.%{stable_base}.bz2 +Patch00: %{stable_patch_00} +%endif +%if 0%{?stable_rc} +%define stable_patch_01 patch-2.6.%{base_sublevel}.%{stable_update}-rc%{stable_rc}.bz2 +Patch01: %{stable_patch_01} +%endif + +# non-released_kernel case +# These are automagically defined by the rcrev and gitrev values set up +# near the top of this spec file. +%else +%if 0%{?rcrev} +Patch00: patch-2.6.%{upstream_sublevel}-rc%{rcrev}.bz2 +%if 0%{?gitrev} +Patch01: patch-2.6.%{upstream_sublevel}-rc%{rcrev}-git%{gitrev}.bz2 +%endif +%else +# pre-{base_sublevel+1}-rc1 case +%if 0%{?gitrev} +Patch00: patch-2.6.%{base_sublevel}-git%{gitrev}.bz2 +%endif +%endif +%endif + +%if %{using_upstream_branch} +### BRANCH PATCH ### +%endif + +Patch02: git-linus.diff + +# we always need nonintconfig, even for -vanilla kernels +Patch03: linux-2.6-build-nonintconfig.patch + +# we also need compile fixes for -vanilla +Patch04: linux-2.6-compile-fixes.patch + +# build tweak for build ID magic, even for -vanilla +Patch05: linux-2.6-makefile-after_link.patch + +%if !%{nopatches} + +# revert upstream patches we get via other methods +Patch09: linux-2.6-upstream-reverts.patch +# Git trees. +Patch11: git-bluetooth.patch +Patch12: git-cpufreq.patch + +# Standalone patches +Patch20: linux-2.6-hotfixes.patch + +Patch21: linux-2.6-tracehook.patch +Patch22: linux-2.6-utrace.patch +Patch23: linux-2.6-utrace-ptrace.patch + +Patch50: linux-2.6-x86-cfi_sections.patch + +Patch144: linux-2.6-vio-modalias.patch + +Patch150: linux-2.6.29-sparc-IOC_TYPECHECK.patch + +Patch160: linux-2.6-execshield.patch + +Patch200: linux-2.6-debug-sizeof-structs.patch +Patch201: linux-2.6-debug-nmi-timeout.patch +Patch202: linux-2.6-debug-taint-vm.patch +Patch203: linux-2.6-debug-vm-would-have-oomkilled.patch +Patch204: linux-2.6-debug-always-inline-kzalloc.patch + +Patch300: linux-2.6-driver-level-usb-autosuspend.diff +Patch303: linux-2.6-enable-btusb-autosuspend.patch +Patch304: linux-2.6-usb-uvc-autosuspend.diff +Patch305: linux-2.6-fix-btusb-autosuspend.patch + +Patch310: linux-2.6-usb-wwan-update.patch + +Patch380: linux-2.6-defaults-pci_no_msi.patch +# enable ASPM +Patch383: linux-2.6-defaults-aspm.patch +Patch384: pci-acpi-disable-aspm-if-no-osc.patch +Patch385: pci-aspm-dont-enable-too-early.patch + +# 2.6.34 bugfixes +Patch386: pci-pm-do-not-use-native-pcie-pme-by-default.patch +Patch387: pci-fall-back-to-original-bios-bar-addresses.patch + +Patch390: linux-2.6-defaults-acpi-video.patch +Patch391: linux-2.6-acpi-video-dos.patch +Patch392: linux-2.6-acpi-video-export-edid.patch +Patch393: acpi-ec-add-delay-before-write.patch + +Patch450: linux-2.6-input-kill-stupid-messages.patch +Patch452: linux-2.6.30-no-pcspkr-modalias.patch +Patch453: thinkpad-acpi-add-x100e.patch +Patch454: thinkpad-acpi-fix-backlight.patch + +Patch460: linux-2.6-serial-460800.patch + +Patch470: die-floppy-die.patch + +Patch510: linux-2.6-silence-noise.patch +Patch520: pci-change-error-messages-to-kern-info.patch +Patch530: linux-2.6-silence-fbcon-logo.patch +Patch570: linux-2.6-selinux-mprotect-checks.patch +Patch580: linux-2.6-sparc-selinux-mprotect-checks.patch + +Patch600: linux-2.6-acpi-sleep-live-sci-live.patch + +Patch610: hda_intel-prealloc-4mb-dmabuffer.patch + +Patch681: linux-2.6-mac80211-age-scan-results-on-resume.patch + +Patch690: iwlwifi-add-internal-short-scan-support-for-3945.patch +Patch691: iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch +Patch692: iwlwifi-move-plcp-check-to-separated-function.patch +Patch693: iwlwifi-Recover-TX-flow-failure.patch +Patch694: iwlwifi-code-cleanup-for-connectivity-recovery.patch +Patch695: iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch +Patch698: iwlwifi-recover_from_tx_stall.patch + +Patch800: linux-2.6-crash-driver.patch + +Patch900: linux-2.6-cantiga-iommu-gfx.patch + +# crypto/ +Patch1200: crypto-add-async-hash-testing.patch + +Patch1515: lirc-2.6.33.patch +Patch1517: hdpvr-ir-enable.patch + +# virt + ksm patches +Patch1550: virtqueue-wrappers.patch +Patch1554: virt_console-rollup.patch +Patch1555: fix_xen_guest_on_old_EC2.patch + +# DRM +Patch1800: drm-next.patch +Patch1801: drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch +Patch1802: revert-drm-kms-toggle-poll-around-switcheroo.patch +Patch1803: drm-encoder-disable.patch +# nouveau + drm fixes +Patch1815: drm-nouveau-updates.patch +Patch1819: drm-intel-big-hammer.patch +# intel drm is all merged upstream +Patch1820: drm-i915-fix-edp-panels.patch +Patch1821: i915-fix-crt-hotplug-regression.patch +Patch1824: drm-intel-next.patch +# make sure the lvds comes back on lid open +Patch1825: drm-intel-make-lvds-work.patch +Patch1830: drm-i915-fix-hibernate-memory-corruption.patch +Patch1831: drm-i915-add-reclaimable-to-page-allocations.patch +Patch1835: drm-i915-make-G4X-style-PLL-search-more-permissive.patch +Patch1836: drm-intel-945gm-stability-fixes.patch +Patch1900: linux-2.6-intel-iommu-igfx.patch +# radeon +Patch1910: drm-radeon-fix-shared-ddc-handling.patch + +# linux1394 git patches +Patch2200: linux-2.6-firewire-git-update.patch +Patch2201: linux-2.6-firewire-git-pending.patch + +Patch2400: linux-2.6-phylib-autoload.patch + +# Quiet boot fixes +# silence the ACPI blacklist code +Patch2802: linux-2.6-silence-acpi-blacklist.patch + +Patch2899: linux-2.6-v4l-dvb-fixes.patch +Patch2900: linux-2.6-v4l-dvb-update.patch +Patch2901: linux-2.6-v4l-dvb-experimental.patch +Patch2905: linux-2.6-v4l-dvb-gspca-fixes.patch +Patch2906: linux-2.6-v4l-dvb-uvcvideo-update.patch + +Patch2910: linux-2.6-v4l-dvb-add-lgdt3304-support.patch +Patch2911: linux-2.6-v4l-dvb-add-kworld-a340-support.patch + +# fs fixes + +Patch3012: btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch + + +# NFSv4 + +# VIA Nano / VX8xx updates + +# patches headed upstream +Patch12005: linux-2.6-input-hid-quirk-egalax.patch + +Patch12015: add-appleir-usb-driver.patch +Patch12016: disable-i8042-check-on-apple-mac.patch + +Patch12017: prevent-runtime-conntrack-changes.patch + +Patch12018: neuter_intel_microcode_load.patch + +Patch12019: linux-2.6-umh-refactor.patch +Patch12020: coredump-uid-pipe-check.patch + +Patch12030: ssb_check_for_sprom.patch + +Patch12035: quiet-prove_RCU-in-cgroups.patch + +Patch12040: iwlwifi-manage-QoS-by-mac-stack.patch +Patch12041: mac80211-do-not-wipe-out-old-supported-rates.patch +Patch12042: mac80211-explicitly-disable-enable-QoS.patch +Patch12043: mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch + +# iwlwifi: cancel scan watchdog in iwl_bg_abort_scan +Patch12050: iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch + +Patch12100: ata-generic-handle-new-mbp-with-mcp89.patch +Patch12110: ata-generic-implement-ata-gen-flags.patch + +Patch12200: x86-debug-send-sigtrap-for-user-icebp.patch +Patch12210: ethtool-fix-buffer-overflow.patch +Patch12220: sched-fix-over-scheduling-bug.patch +Patch12230: kbuild-fix-modpost-segfault.patch + +Patch12250: inotify-fix-inotify-oneshot-support.patch +Patch12260: inotify-send-IN_UNMOUNT-events.patch + +Patch12270: kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch + +# ACPI GPE enable/disable fixes, needed preparation for the powerdown fix +Patch12299: acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch +Patch12300: acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch +Patch12310: acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch +Patch12320: acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch +Patch12330: acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch +Patch12340: acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch +Patch12350: acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch +# fix system powering back on after shutdown (#613239) +Patch12360: acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch + +Patch12400: input-synaptics-relax-capability-id-checks-on-new-hardware.patch + +Patch12410: cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch + +Patch12420: usb-obey-the-sysfs-power-wakeup-setting.patch + +%endif + +BuildRoot: %{_tmppath}/kernel-%{KVERREL}-root + +%description +The kernel package contains the Linux kernel (vmlinuz), the core of any +Linux operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + + +%package doc +Summary: Various documentation bits found in the kernel source +Group: Documentation +%description doc +This package contains documentation files from the kernel +source. Various bits of information about the Linux kernel and the +device drivers shipped with it are documented in these files. + +You'll want to install this package if you need a reference to the +options that can be passed to Linux kernel modules at load time. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders +Provides: glibc-kernheaders = 3.0-46 +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%package firmware +Summary: Firmware files used by the Linux kernel +Group: Development/System +# This is... complicated. +# Look at the WHENCE file. +License: GPL+ and GPLv2+ and MIT and Redistributable, no modification permitted +%if "x%{?variant}" != "x" +Provides: kernel-firmware = %{rpmversion}-%{pkg_release} +%endif +%description firmware +Kernel-firmware includes firmware files required for some devices to +operate. + +%package bootwrapper +Summary: Boot wrapper files for generating combined kernel + initrd images +Group: Development/System +Requires: gzip +%description bootwrapper +Kernel-bootwrapper contains the wrapper code which makes bootable "zImage" +files combining both kernel and initial ramdisk. + +%package debuginfo-common-%{_target_cpu} +Summary: Kernel source files used by %{name}-debuginfo packages +Group: Development/Debug +%description debuginfo-common-%{_target_cpu} +This package is required by %{name}-debuginfo subpackages. +It provides the kernel source files common to all builds. + +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +License: GPLv2 +%description -n perf +This package provides the supporting documentation for the perf tool +shipped in each kernel image subpackage. + +# +# This macro creates a kernel--debuginfo package. +# %%kernel_debuginfo_package +# +%define kernel_debuginfo_package() \ +%package %{?1:%{1}-}debuginfo\ +Summary: Debug information for package %{name}%{?1:-%{1}}\ +Group: Development/Debug\ +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}%{?1:-%{1}}-debuginfo-%{_target_cpu} = %{version}-%{release}\ +AutoReqProv: no\ +%description -n %{name}%{?1:-%{1}}-debuginfo\ +This package provides debug information for package %{name}%{?1:-%{1}}.\ +This is required to use SystemTap with %{name}%{?1:-%{1}}-%{KVERREL}.\ +%{expand:%%global debuginfo_args %{?debuginfo_args} -p '/.*/%%{KVERREL}%{?1:\.%{1}}/.*|/.*%%{KVERREL}%{?1:\.%{1}}(\.debug)?' -o debuginfo%{?1}.list}\ +%{nil} + +# +# This macro creates a kernel--devel package. +# %%kernel_devel_package +# +%define kernel_devel_package() \ +%package %{?1:%{1}-}devel\ +Summary: Development package for building kernel modules to match the %{?2:%{2} }kernel\ +Group: System Environment/Kernel\ +Provides: kernel%{?1:-%{1}}-devel-%{_target_cpu} = %{version}-%{release}\ +Provides: kernel-devel-%{_target_cpu} = %{version}-%{release}%{?1:.%{1}}\ +Provides: kernel-devel = %{version}-%{release}%{?1:.%{1}}\ +Provides: kernel-devel-uname-r = %{KVERREL}%{?1:.%{1}}\ +AutoReqProv: no\ +Requires(pre): /usr/bin/find\ +%description -n kernel%{?variant}%{?1:-%{1}}-devel\ +This package provides kernel headers and makefiles sufficient to build modules\ +against the %{?2:%{2} }kernel package.\ +%{nil} + +# +# This macro creates a kernel- and its -devel and -debuginfo too. +# %%define variant_summary The Linux kernel compiled for +# %%kernel_variant_package [-n ] +# +%define kernel_variant_package(n:) \ +%package %1\ +Summary: %{variant_summary}\ +Group: System Environment/Kernel\ +%kernel_reqprovconf\ +%{expand:%%kernel_devel_package %1 %{!?-n:%1}%{?-n:%{-n*}}}\ +%{expand:%%kernel_debuginfo_package %1}\ +%{nil} + + +# First the auxiliary packages of the main kernel package. +%kernel_devel_package +%kernel_debuginfo_package + + +# Now, each variant package. + +%define variant_summary The Linux kernel compiled for SMP machines +%kernel_variant_package -n SMP smp +%description smp +This package includes a SMP version of the Linux kernel. It is +required only on machines with two or more CPUs as well as machines with +hyperthreading technology. + +Install the kernel-smp package if your machine uses two or more CPUs. + + +%define variant_summary The Linux kernel compiled for PAE capable machines +%kernel_variant_package PAE +%description PAE +This package includes a version of the Linux kernel with support for up to +64GB of high memory. It requires a CPU with Physical Address Extensions (PAE). +The non-PAE kernel can only address up to 4GB of memory. +Install the kernel-PAE package if your machine has more than 4GB of memory. + + +%define variant_summary The Linux kernel compiled with extra debugging enabled for PAE capable machines +%kernel_variant_package PAEdebug +Obsoletes: kernel-PAE-debug +%description PAEdebug +This package includes a version of the Linux kernel with support for up to +64GB of high memory. It requires a CPU with Physical Address Extensions (PAE). +The non-PAE kernel can only address up to 4GB of memory. +Install the kernel-PAE package if your machine has more than 4GB of memory. + +This variant of the kernel has numerous debugging options enabled. +It should only be installed when trying to gather additional information +on kernel bugs, as some of these options impact performance noticably. + + +%define variant_summary The Linux kernel compiled with extra debugging enabled +%kernel_variant_package debug +%description debug +The kernel package contains the Linux kernel (vmlinuz), the core of any +Linux operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + +This variant of the kernel has numerous debugging options enabled. +It should only be installed when trying to gather additional information +on kernel bugs, as some of these options impact performance noticably. + + +%prep +# do a few sanity-checks for --with *only builds +%if %{with_baseonly} +%if !%{with_up}%{with_pae} +echo "Cannot build --with baseonly, up build is disabled" +exit 1 +%endif +%endif + +%if %{with_smponly} +%if !%{with_smp} +echo "Cannot build --with smponly, smp build is disabled" +exit 1 +%endif +%endif + +# more sanity checking; do it quietly +if [ "%{patches}" != "%%{patches}" ] ; then + for patch in %{patches} ; do + if [ ! -f $patch ] ; then + echo "ERROR: Patch ${patch##/*/} listed in specfile but is missing" + exit 1 + fi + done +fi 2>/dev/null + +patch_command='patch -p1 -F1 -s' +ApplyPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi +%if !%{using_upstream_branch} + if ! egrep "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then + if [ "${patch:0:10}" != "patch-2.6." ] ; then + echo "ERROR: Patch $patch not listed as a source patch in specfile" + exit 1 + fi + fi 2>/dev/null +%endif + case "$patch" in + *.bz2) bunzip2 < "$RPM_SOURCE_DIR/$patch" | $patch_command ${1+"$@"} ;; + *.gz) gunzip < "$RPM_SOURCE_DIR/$patch" | $patch_command ${1+"$@"} ;; + *) $patch_command ${1+"$@"} < "$RPM_SOURCE_DIR/$patch" ;; + esac +} + +# don't apply patch if it's empty +ApplyOptionalPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + local C=$(wc -l $RPM_SOURCE_DIR/$patch | awk '{print $1}') + if [ "$C" -gt 9 ]; then + ApplyPatch $patch ${1+"$@"} + fi +} + +# we don't want a .config file when building firmware: it just confuses the build system +%define build_firmware \ + mv .config .config.firmware_save \ + make INSTALL_FW_PATH=$RPM_BUILD_ROOT/lib/firmware firmware_install \ + mv .config.firmware_save .config + +# First we unpack the kernel tarball. +# If this isn't the first make prep, we use links to the existing clean tarball +# which speeds things up quite a bit. + +# Update to latest upstream. +%if 0%{?released_kernel} +%define vanillaversion 2.6.%{base_sublevel} +# non-released_kernel case +%else +%if 0%{?rcrev} +%define vanillaversion 2.6.%{upstream_sublevel}-rc%{rcrev} +%if 0%{?gitrev} +%define vanillaversion 2.6.%{upstream_sublevel}-rc%{rcrev}-git%{gitrev} +%endif +%else +# pre-{base_sublevel+1}-rc1 case +%if 0%{?gitrev} +%define vanillaversion 2.6.%{base_sublevel}-git%{gitrev} +%else +%define vanillaversion 2.6.%{base_sublevel} +%endif +%endif +%endif + +# We can share hardlinked source trees by putting a list of +# directory names of the CVS checkouts that we want to share +# with in .shared-srctree. (Full pathnames are required.) +[ -f .shared-srctree ] && sharedirs=$(cat .shared-srctree) + +if [ ! -d kernel-%{kversion}/vanilla-%{vanillaversion} ]; then + + if [ -d kernel-%{kversion}/vanilla-%{kversion} ]; then + + cd kernel-%{kversion} + + # Any vanilla-* directories other than the base one are stale. + for dir in vanilla-*; do + [ "$dir" = vanilla-%{kversion} ] || rm -rf $dir & + done + + else + + # Ok, first time we do a make prep. + rm -f pax_global_header + for sharedir in $sharedirs ; do + if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{kversion} ]] ; then + break + fi + done + if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{kversion} ]] ; then +%setup -q -n kernel-%{kversion} -c -T + cp -rl $sharedir/kernel-%{kversion}/vanilla-%{kversion} . + else +%setup -q -n kernel-%{kversion} -c + mv linux-%{kversion} vanilla-%{kversion} + fi + + fi + +%if "%{kversion}" != "%{vanillaversion}" + + for sharedir in $sharedirs ; do + if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{vanillaversion} ]] ; then + break + fi + done + if [[ ! -z $sharedir && -d $sharedir/kernel-%{kversion}/vanilla-%{vanillaversion} ]] ; then + + cp -rl $sharedir/kernel-%{kversion}/vanilla-%{vanillaversion} . + + else + + cp -rl vanilla-%{kversion} vanilla-%{vanillaversion} + cd vanilla-%{vanillaversion} + +# Update vanilla to the latest upstream. +# (non-released_kernel case only) +%if 0%{?rcrev} + ApplyPatch patch-2.6.%{upstream_sublevel}-rc%{rcrev}.bz2 +%if 0%{?gitrev} + ApplyPatch patch-2.6.%{upstream_sublevel}-rc%{rcrev}-git%{gitrev}.bz2 +%endif +%else +# pre-{base_sublevel+1}-rc1 case +%if 0%{?gitrev} + ApplyPatch patch-2.6.%{base_sublevel}-git%{gitrev}.bz2 +%endif +%endif + + cd .. + + fi + +%endif + +else + # We already have a vanilla dir. + cd kernel-%{kversion} +fi + +if [ -d linux-%{kversion}.%{_target_cpu} ]; then + # Just in case we ctrl-c'd a prep already + rm -rf deleteme.%{_target_cpu} + # Move away the stale away, and delete in background. + mv linux-%{kversion}.%{_target_cpu} deleteme.%{_target_cpu} + rm -rf deleteme.%{_target_cpu} & +fi + +cp -rl vanilla-%{vanillaversion} linux-%{kversion}.%{_target_cpu} + +cd linux-%{kversion}.%{_target_cpu} + +# released_kernel with possible stable updates +%if 0%{?stable_base} +ApplyPatch %{stable_patch_00} +%endif +%if 0%{?stable_rc} +ApplyPatch %{stable_patch_01} +%endif + +%if %{using_upstream_branch} +### BRANCH APPLY ### +%endif + +# Drop some necessary files from the source dir into the buildroot +cp $RPM_SOURCE_DIR/config-* . +cp %{SOURCE15} . + +# Dynamically generate kernel .config files from config-* files +make -f %{SOURCE20} VERSION=%{version} configs + +#if a rhel kernel, apply the rhel config options +%if 0%{?rhel} + for i in %{all_arch_configs} + do + mv $i $i.tmp + ./merge.pl config-rhel-generic $i.tmp > $i + rm $i.tmp + done +%endif + +ApplyOptionalPatch git-linus.diff + +# This patch adds a "make nonint_oldconfig" which is non-interactive and +# also gives a list of missing options at the end. Useful for automated +# builds (as used in the buildsystem). +ApplyPatch linux-2.6-build-nonintconfig.patch + +ApplyPatch linux-2.6-makefile-after_link.patch + +# +# misc small stuff to make things compile +# +ApplyOptionalPatch linux-2.6-compile-fixes.patch + +%if !%{nopatches} + +# revert patches from upstream that conflict or that we get via other means +ApplyOptionalPatch linux-2.6-upstream-reverts.patch -R + +ApplyOptionalPatch git-bluetooth.patch +ApplyOptionalPatch git-cpufreq.patch + +ApplyPatch linux-2.6-hotfixes.patch + +# Roland's utrace ptrace replacement. +ApplyPatch linux-2.6-tracehook.patch +ApplyPatch linux-2.6-utrace.patch +ApplyPatch linux-2.6-utrace-ptrace.patch + +# Architecture patches +# x86(-64) +ApplyPatch linux-2.6-x86-cfi_sections.patch + +# +# Intel IOMMU +# + +# +# PowerPC +# +# Provide modalias in sysfs for vio devices +ApplyPatch linux-2.6-vio-modalias.patch + +# +# SPARC64 +# +ApplyPatch linux-2.6.29-sparc-IOC_TYPECHECK.patch + +# +# Exec shield +# +ApplyPatch linux-2.6-execshield.patch + +# +# bugfixes to drivers and filesystems +# + +# ext4 + +# xfs + +# btrfs +ApplyPatch btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch + + +# eCryptfs + +# NFSv4 + +# USB +#ApplyPatch linux-2.6-driver-level-usb-autosuspend.diff +#ApplyPatch linux-2.6-enable-btusb-autosuspend.patch +#ApplyPatch linux-2.6-usb-uvc-autosuspend.diff +#ApplyPatch linux-2.6-fix-btusb-autosuspend.patch +ApplyPatch linux-2.6-usb-wwan-update.patch + +# WMI + +# ACPI +ApplyPatch linux-2.6-defaults-acpi-video.patch +ApplyPatch linux-2.6-acpi-video-dos.patch +ApplyPatch linux-2.6-acpi-video-export-edid.patch +ApplyPatch acpi-ec-add-delay-before-write.patch + +# Various low-impact patches to aid debugging. +ApplyPatch linux-2.6-debug-sizeof-structs.patch +ApplyPatch linux-2.6-debug-nmi-timeout.patch +ApplyPatch linux-2.6-debug-taint-vm.patch +ApplyPatch linux-2.6-debug-vm-would-have-oomkilled.patch +ApplyPatch linux-2.6-debug-always-inline-kzalloc.patch + +# +# PCI +# +# make default state of PCI MSI a config option +ApplyPatch linux-2.6-defaults-pci_no_msi.patch +# enable ASPM by default on hardware we expect to work +ApplyPatch linux-2.6-defaults-aspm.patch +# disable aspm if acpi doesn't provide an _OSC method +ApplyPatch pci-acpi-disable-aspm-if-no-osc.patch +# allow drivers to disable aspm at load time +ApplyPatch pci-aspm-dont-enable-too-early.patch +# stop PCIe hotplug interrupt storm (#613412) +ApplyPatch pci-pm-do-not-use-native-pcie-pme-by-default.patch +# fall back to original BIOS address when reassignment fails (KORG#16263) +ApplyPatch pci-fall-back-to-original-bios-bar-addresses.patch + +# +# SCSI Bits. +# + +# ACPI +ApplyPatch linux-2.6-acpi-sleep-live-sci-live.patch + +# ALSA +ApplyPatch hda_intel-prealloc-4mb-dmabuffer.patch + +# Networking + +# Misc fixes +# The input layer spews crap no-one cares about. +ApplyPatch linux-2.6-input-kill-stupid-messages.patch + +# stop floppy.ko from autoloading during udev... +ApplyPatch die-floppy-die.patch + +ApplyPatch linux-2.6.30-no-pcspkr-modalias.patch + +ApplyPatch linux-2.6-input-hid-quirk-egalax.patch +ApplyPatch thinkpad-acpi-add-x100e.patch +ApplyPatch thinkpad-acpi-fix-backlight.patch + +# Allow to use 480600 baud on 16C950 UARTs +ApplyPatch linux-2.6-serial-460800.patch + +# Silence some useless messages that still get printed with 'quiet' +ApplyPatch linux-2.6-silence-noise.patch +ApplyPatch pci-change-error-messages-to-kern-info.patch + +# Make fbcon not show the penguins with 'quiet' +ApplyPatch linux-2.6-silence-fbcon-logo.patch + +# Fix the SELinux mprotect checks on executable mappings +#ApplyPatch linux-2.6-selinux-mprotect-checks.patch +# Fix SELinux for sparc +# FIXME: Can we drop this now? See updated linux-2.6-selinux-mprotect-checks.patch +#ApplyPatch linux-2.6-sparc-selinux-mprotect-checks.patch + +# Changes to upstream defaults. + + +# back-port scan result aging patches +#ApplyPatch linux-2.6-mac80211-age-scan-results-on-resume.patch + +# /dev/crash driver. +ApplyPatch linux-2.6-crash-driver.patch + +# Cantiga chipset b0rkage +ApplyPatch linux-2.6-cantiga-iommu-gfx.patch + +# crypto/ + +# Add async hash testing (a8f1a05) +ApplyPatch crypto-add-async-hash-testing.patch + +# http://www.lirc.org/ +ApplyPatch lirc-2.6.33.patch +# enable IR receiver on Hauppauge HD PVR (v4l-dvb merge pending) +ApplyPatch hdpvr-ir-enable.patch + +# Assorted Virt Fixes +ApplyPatch virtqueue-wrappers.patch +ApplyPatch virt_console-rollup.patch +ApplyPatch fix_xen_guest_on_old_EC2.patch + +ApplyPatch drm-next.patch +ApplyPatch drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch +ApplyPatch revert-drm-kms-toggle-poll-around-switcheroo.patch +ApplyPatch drm-i915-fix-edp-panels.patch +ApplyPatch i915-fix-crt-hotplug-regression.patch +# RHBZ#572799 +ApplyPatch drm-i915-make-G4X-style-PLL-search-more-permissive.patch +ApplyPatch drm-intel-945gm-stability-fixes.patch +ApplyPatch drm-encoder-disable.patch + +# Nouveau DRM + drm fixes +ApplyPatch drm-nouveau-updates.patch + +ApplyPatch drm-intel-big-hammer.patch +ApplyOptionalPatch drm-intel-next.patch +ApplyPatch drm-intel-make-lvds-work.patch + +# radeon fixes +ApplyPatch drm-radeon-fix-shared-ddc-handling.patch + +# hibernation memory corruption fixes +ApplyPatch drm-i915-fix-hibernate-memory-corruption.patch +ApplyPatch drm-i915-add-reclaimable-to-page-allocations.patch + +ApplyPatch linux-2.6-intel-iommu-igfx.patch + +# linux1394 git patches +ApplyOptionalPatch linux-2.6-firewire-git-update.patch +ApplyOptionalPatch linux-2.6-firewire-git-pending.patch + +# silence the ACPI blacklist code +ApplyPatch linux-2.6-silence-acpi-blacklist.patch + +# V4L/DVB updates/fixes/experimental drivers +# apply if non-empty +ApplyOptionalPatch linux-2.6-v4l-dvb-fixes.patch +ApplyOptionalPatch linux-2.6-v4l-dvb-update.patch +ApplyOptionalPatch linux-2.6-v4l-dvb-experimental.patch + +ApplyPatch linux-2.6-v4l-dvb-gspca-fixes.patch +ApplyPatch linux-2.6-v4l-dvb-uvcvideo-update.patch + +ApplyPatch linux-2.6-v4l-dvb-add-lgdt3304-support.patch +ApplyPatch linux-2.6-v4l-dvb-add-kworld-a340-support.patch + +ApplyPatch linux-2.6-phylib-autoload.patch + +# Patches headed upstream +ApplyPatch add-appleir-usb-driver.patch +ApplyPatch disable-i8042-check-on-apple-mac.patch + +ApplyPatch neuter_intel_microcode_load.patch + +# Refactor UserModeHelper code & satisfy abrt recursion check request +#ApplyPatch linux-2.6-umh-refactor.patch +#ApplyPatch coredump-uid-pipe-check.patch + +# rhbz#533746 +ApplyPatch ssb_check_for_sprom.patch + +# iwlwifi fixes from F-13-2.6.33 +ApplyPatch iwlwifi-add-internal-short-scan-support-for-3945.patch +ApplyPatch iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch +ApplyPatch iwlwifi-move-plcp-check-to-separated-function.patch +ApplyPatch iwlwifi-Recover-TX-flow-failure.patch +ApplyPatch iwlwifi-code-cleanup-for-connectivity-recovery.patch +ApplyPatch iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch +ApplyPatch iwlwifi-recover_from_tx_stall.patch + +# mac80211/iwlwifi fix connections to some APs (rhbz#558002) +ApplyPatch mac80211-explicitly-disable-enable-QoS.patch +ApplyPatch iwlwifi-manage-QoS-by-mac-stack.patch +ApplyPatch mac80211-do-not-wipe-out-old-supported-rates.patch +ApplyPatch mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch + +# iwlwifi: cancel scan watchdog in iwl_bg_abort_scan +ApplyPatch iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch + +ApplyPatch quiet-prove_RCU-in-cgroups.patch + +# BZ#608034 +ApplyPatch ata-generic-handle-new-mbp-with-mcp89.patch +ApplyPatch ata-generic-implement-ata-gen-flags.patch + +# BZ#609548 +ApplyPatch x86-debug-send-sigtrap-for-user-icebp.patch + +# CVE-2010-2478 +ApplyPatch ethtool-fix-buffer-overflow.patch + +# fix performance problem with CGROUPS +ApplyPatch sched-fix-over-scheduling-bug.patch + +# fix modpost segfault during kernel build (#595915) +ApplyPatch kbuild-fix-modpost-segfault.patch + +# fix broken oneshot support and missing umount events (#607327) +ApplyPatch inotify-fix-inotify-oneshot-support.patch +ApplyPatch inotify-send-IN_UNMOUNT-events.patch + +# 610911 +ApplyPatch kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch + +# ACPI GPE enable/disable fixes, needed preparation for the powerdown fix +ApplyPatch acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch +ApplyPatch acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch +ApplyPatch acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch +ApplyPatch acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch +ApplyPatch acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch +ApplyPatch acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch +ApplyPatch acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch +# fix system powering back on after shutdown (#613239) +ApplyPatch acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch + +# fix newer synaptics touchpads not being recognized +ApplyPatch input-synaptics-relax-capability-id-checks-on-new-hardware.patch + +# CVE-2010-2524 +ApplyPatch cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch + +# restore ability of USB remotes to wake the machine (#617559) +ApplyPatch usb-obey-the-sysfs-power-wakeup-setting.patch + +# END OF PATCH APPLICATIONS + +%endif + +# Any further pre-build tree manipulations happen here. + +chmod +x scripts/checkpatch.pl + +# only deal with configs if we are going to build for the arch +%ifnarch %nobuildarches + +mkdir configs + +# Remove configs not for the buildarch +for cfg in kernel-%{version}-*.config; do + if [ `echo %{all_arch_configs} | grep -c $cfg` -eq 0 ]; then + rm -f $cfg + fi +done + +%if !%{debugbuildsenabled} +rm -f kernel-%{version}-*debug.config +%endif + +# now run oldconfig over all the config files +for i in *.config +do + mv $i .config + Arch=`head -1 .config | cut -b 3-` + make ARCH=$Arch %{oldconfig_target} > /dev/null + echo "# $Arch" > configs/$i + cat .config >> configs/$i +done +# end of kernel config +%endif + +# get rid of unwanted files resulting from patch fuzz +find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null + +cd .. + +### +### build +### +%build + +%if %{with_sparse} +%define sparse_mflags C=1 +%endif + +%if %{fancy_debuginfo} +# This override tweaks the kernel makefiles so that we run debugedit on an +# object before embedding it. When we later run find-debuginfo.sh, it will +# run debugedit again. The edits it does change the build ID bits embedded +# in the stripped object, but repeating debugedit is a no-op. We do it +# beforehand to get the proper final build ID bits into the embedded image. +# This affects the vDSO images in vmlinux, and the vmlinux image in bzImage. +export AFTER_LINK=\ +'sh -xc "/usr/lib/rpm/debugedit -b $$RPM_BUILD_DIR -d /usr/src/debug \ + -i $@ > $@.id"' +%endif + +cp_vmlinux() +{ + eu-strip --remove-comment -o "$2" "$1" +} + +BuildKernel() { + MakeTarget=$1 + KernelImage=$2 + Flavour=$3 + InstallName=${4:-vmlinuz} + + # Pick the right config file for the kernel we're building + Config=kernel-%{version}-%{_target_cpu}${Flavour:+-${Flavour}}.config + DevelDir=/usr/src/kernels/%{KVERREL}${Flavour:+.${Flavour}} + + # When the bootable image is just the ELF kernel, strip it. + # We already copy the unstripped file into the debuginfo package. + if [ "$KernelImage" = vmlinux ]; then + CopyKernel=cp_vmlinux + else + CopyKernel=cp + fi + + KernelVer=%{version}-%{release}.%{_target_cpu}${Flavour:+.${Flavour}} + echo BUILDING A KERNEL FOR ${Flavour} %{_target_cpu}... + + # make sure EXTRAVERSION says what we want it to say + perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = %{?stablerev}-%{release}.%{_target_cpu}${Flavour:+.${Flavour}}/" Makefile + + # if pre-rc1 devel kernel, must fix up SUBLEVEL for our versioning scheme + %if !0%{?rcrev} + %if 0%{?gitrev} + perl -p -i -e 's/^SUBLEVEL.*/SUBLEVEL = %{upstream_sublevel}/' Makefile + %endif + %endif + + # and now to start the build process + + make -s mrproper + cp configs/$Config .config + + Arch=`head -1 .config | cut -b 3-` + echo USING ARCH=$Arch + + make -s ARCH=$Arch %{oldconfig_target} > /dev/null + make -s ARCH=$Arch V=1 %{?_smp_mflags} $MakeTarget %{?sparse_mflags} + make -s ARCH=$Arch V=1 %{?_smp_mflags} modules %{?sparse_mflags} || exit 1 + +%if %{with_perftool} + pushd tools/perf +# make sure the scripts are executable... won't be in tarball until 2.6.31 :/ + chmod +x util/generate-cmdlist.sh util/PERF-VERSION-GEN + make -s V=1 NO_DEMANGLE=1 %{?_smp_mflags} perf + mkdir -p $RPM_BUILD_ROOT/usr/libexec/ + install -m 755 perf $RPM_BUILD_ROOT/usr/libexec/perf.$KernelVer + popd +%endif + + # Start installing the results +%if %{with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/boot + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/%{image_install_path} +%endif + mkdir -p $RPM_BUILD_ROOT/%{image_install_path} + install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer + install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-$KernelVer + + # We estimate the size of the initramfs because rpm needs to take this size + # into consideration when performing disk space calculations. (See bz #530778) + dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-$KernelVer.img bs=1M count=20 + + if [ -f arch/$Arch/boot/zImage.stub ]; then + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/%{image_install_path}/zImage.stub-$KernelVer || : + fi + $CopyKernel $KernelImage \ + $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + chmod 755 $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer + # Override $(mod-fw) because we don't want it to install any firmware + # We'll do that ourselves with 'make firmware_install' + make -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer mod-fw= +%ifarch %{vdso_arches} + make -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=$KernelVer + if [ ! -s ldconfig-kernel.conf ]; then + echo > ldconfig-kernel.conf "\ +# Placeholder file, no vDSO hwcap entries used in this kernel." + fi + %{__install} -D -m 444 ldconfig-kernel.conf \ + $RPM_BUILD_ROOT/etc/ld.so.conf.d/kernel-$KernelVer.conf +%endif + + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/source + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + (cd $RPM_BUILD_ROOT/lib/modules/$KernelVer ; ln -s build source) + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/extra + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/updates + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/weak-updates + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp Module.symvers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + if [ -s Module.markers ]; then + cp Module.markers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + fi + # then drop all but the needed Makefiles/Kconfig files + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Documentation + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + cp .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp -a scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + if [ -d arch/$Arch/scripts ]; then + cp -a arch/$Arch/scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch} || : + fi + if [ -f arch/$Arch/*lds ]; then + cp -a arch/$Arch/*lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/ || : + fi + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*.o + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*/*.o +%ifarch ppc + cp -a --parents arch/powerpc/lib/crtsavres.[So] $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + if [ -d arch/%{asmarch}/include ]; then + cp -a --parents arch/%{asmarch}/include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi + cp -a include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + + # Make sure the Makefile and version.h have a matching timestamp so that + # external modules can be built + touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Makefile $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/linux/version.h + touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/.config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/linux/autoconf.h + # Copy .config to include/config/auto.conf so "make prepare" is unnecessary. + cp $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/.config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/config/auto.conf + + if test -s vmlinux.id; then + cp vmlinux.id $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/vmlinux.id + else + echo >&2 "*** WARNING *** no vmlinux build ID! ***" + fi + + # + # save the vmlinux file for kernel debugging into the kernel-debuginfo rpm + # +%if %{with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer + cp vmlinux $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer +%endif + + find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" -type f >modnames + + # mark modules executable so that strip-to-file can strip them + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + + fgrep /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + } + + collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register' + collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler' + collect_modules_list drm \ + 'drm_open|drm_init' + collect_modules_list modesetting \ + 'drm_crtc_init' + + # detect missing or incorrect license tags + rm -f modinfo + while read i + do + echo -n "${i#$RPM_BUILD_ROOT/lib/modules/$KernelVer/} " >> modinfo + /sbin/modinfo -l $i >> modinfo + done < modnames + + egrep -v \ + 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' \ + modinfo && exit 1 + + rm -f modinfo modnames + + # remove files that will be auto generated by depmod at rpm -i time + for i in alias alias.bin ccwmap dep dep.bin ieee1394map inputmap isapnpmap ofmap pcimap seriomap symbols symbols.bin usbmap + do + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$i + done + + # Move the devel headers out of the root file system + mkdir -p $RPM_BUILD_ROOT/usr/src/kernels + mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir + ln -sf ../../..$DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build +} + +### +# DO it... +### + +# prepare directories +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/boot + +cd linux-%{kversion}.%{_target_cpu} + +%if %{with_debug} +BuildKernel %make_target %kernel_image debug +%endif + +%if %{with_pae_debug} +BuildKernel %make_target %kernel_image PAEdebug +%endif + +%if %{with_pae} +BuildKernel %make_target %kernel_image PAE +%endif + +%if %{with_up} +BuildKernel %make_target %kernel_image +%endif + +%if %{with_smp} +BuildKernel %make_target %kernel_image smp +%endif + +%if %{with_doc} +# Make the HTML and man pages. +make %{?_smp_mflags} htmldocs mandocs || %{doc_build_fail} + +# sometimes non-world-readable files sneak into the kernel source tree +chmod -R a=rX Documentation +find Documentation -type d | xargs chmod u+w +%endif + +%if %{with_perf} +pushd tools/perf +make %{?_smp_mflags} man || %{doc_build_fail} +popd +%endif + +### +### Special hacks for debuginfo subpackages. +### + +# This macro is used by %%install, so we must redefine it before that. +%define debug_package %{nil} + +%if %{fancy_debuginfo} +%define __debug_install_post \ + /usr/lib/rpm/find-debuginfo.sh %{debuginfo_args} %{_builddir}/%{?buildsubdir}\ +%{nil} +%endif + +%if %{with_debuginfo} +%ifnarch noarch +%global __debug_package 1 +%files -f debugfiles.list debuginfo-common-%{_target_cpu} +%defattr(-,root,root) +%endif +%endif + +### +### install +### + +%install + +cd linux-%{kversion}.%{_target_cpu} + +%if %{with_doc} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{rpmversion} +man9dir=$RPM_BUILD_ROOT%{_datadir}/man/man9 + +# copy the source over +mkdir -p $docdir +tar -f - --exclude=man --exclude='.*' -c Documentation | tar xf - -C $docdir + +# Install man pages for the kernel API. +mkdir -p $man9dir +find Documentation/DocBook/man -name '*.9.gz' -print0 | +xargs -0 --no-run-if-empty %{__install} -m 444 -t $man9dir $m +ls $man9dir | grep -q '' || > $man9dir/BROKEN +%endif # with_doc + +# perf docs +%if %{with_perf} +mandir=$RPM_BUILD_ROOT%{_datadir}/man +man1dir=$mandir/man1 +pushd tools/perf/Documentation +make install-man mandir=$mandir +popd + +pushd $man1dir +for d in *.1; do + gzip $d; +done +popd +%endif # with_perf + +# perf shell wrapper +%if %{with_perf} +mkdir -p $RPM_BUILD_ROOT/usr/sbin/ +cp $RPM_SOURCE_DIR/perf $RPM_BUILD_ROOT/usr/sbin/perf +chmod 0755 $RPM_BUILD_ROOT/usr/sbin/perf +mkdir -p $RPM_BUILD_ROOT%{_datadir}/doc/perf +%endif + +%if %{with_headers} +# Install kernel headers +make ARCH=%{hdrarch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install + +# Do headers_check but don't die if it fails. +make ARCH=%{hdrarch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_check \ + > hdrwarnings.txt || : +if grep -q exist hdrwarnings.txt; then + sed s:^$RPM_BUILD_ROOT/usr/include/:: hdrwarnings.txt + # Temporarily cause a build failure if header inconsistencies. + # exit 1 +fi + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) | xargs rm -f + +# glibc provides scsi headers for itself, for now +rm -rf $RPM_BUILD_ROOT/usr/include/scsi +rm -f $RPM_BUILD_ROOT/usr/include/asm*/atomic.h +rm -f $RPM_BUILD_ROOT/usr/include/asm*/io.h +rm -f $RPM_BUILD_ROOT/usr/include/asm*/irq.h +%endif + +%if %{with_firmware} +%{build_firmware} +%endif + +%if %{with_bootwrapper} +make DESTDIR=$RPM_BUILD_ROOT bootwrapper_install WRAPPER_OBJDIR=%{_libdir}/kernel-wrapper WRAPPER_DTSDIR=%{_libdir}/kernel-wrapper/dts +%endif + + +### +### clean +### + +%clean +rm -rf $RPM_BUILD_ROOT + +### +### scripts +### + +# +# This macro defines a %%post script for a kernel*-devel package. +# %%kernel_devel_post [] +# +%define kernel_devel_post() \ +%{expand:%%post %{?1:%{1}-}devel}\ +if [ -f /etc/sysconfig/kernel ]\ +then\ + . /etc/sysconfig/kernel || exit $?\ +fi\ +if [ "$HARDLINK" != "no" -a -x /usr/sbin/hardlink ]\ +then\ + (cd /usr/src/kernels/%{KVERREL}%{?1:.%{1}} &&\ + /usr/bin/find . -type f | while read f; do\ + hardlink -c /usr/src/kernels/*.fc*.*/$f $f\ + done)\ +fi\ +%{nil} + +# This macro defines a %%posttrans script for a kernel package. +# %%kernel_variant_posttrans [] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_posttrans() \ +%{expand:%%posttrans %{?1}}\ +/sbin/new-kernel-pkg --package kernel%{?-v:-%{-v*}} --mkinitrd --dracut --depmod --update %{KVERREL}%{?-v:.%{-v*}} || exit $?\ +/sbin/new-kernel-pkg --package kernel%{?1:-%{1}} --rpmposttrans %{KVERREL}%{?1:.%{1}} || exit $?\ +%{nil} + +# +# This macro defines a %%post script for a kernel package and its devel package. +# %%kernel_variant_post [-v ] [-r ] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_post(v:r:) \ +%{expand:%%kernel_devel_post %{?-v*}}\ +%{expand:%%kernel_variant_posttrans %{?-v*}}\ +%{expand:%%post %{?-v*}}\ +%{-r:\ +if [ `uname -i` == "x86_64" -o `uname -i` == "i386" ] &&\ + [ -f /etc/sysconfig/kernel ]; then\ + /bin/sed -r -i -e 's/^DEFAULTKERNEL=%{-r*}$/DEFAULTKERNEL=kernel%{?-v:-%{-v*}}/' /etc/sysconfig/kernel || exit $?\ +fi}\ +%{expand:\ +/sbin/new-kernel-pkg --package kernel%{?-v:-%{-v*}} --install %{KVERREL}%{?-v:.%{-v*}} || exit $?\ +}\ +#if [ -x /sbin/weak-modules ]\ +#then\ +# /sbin/weak-modules --add-kernel %{KVERREL}%{?-v*} || exit $?\ +#fi\ +%{nil} + +# +# This macro defines a %%preun script for a kernel package. +# %%kernel_variant_preun +# +%define kernel_variant_preun() \ +%{expand:%%preun %{?1}}\ +/sbin/new-kernel-pkg --rminitrd --rmmoddep --remove %{KVERREL}%{?1:.%{1}} || exit $?\ +#if [ -x /sbin/weak-modules ]\ +#then\ +# /sbin/weak-modules --remove-kernel %{KVERREL}%{?1} || exit $?\ +#fi\ +%{nil} + +%kernel_variant_preun +%kernel_variant_post -r kernel-smp + +%kernel_variant_preun smp +%kernel_variant_post -v smp + +%kernel_variant_preun PAE +%kernel_variant_post -v PAE -r (kernel|kernel-smp) + +%kernel_variant_preun debug +%kernel_variant_post -v debug + +%kernel_variant_post -v PAEdebug -r (kernel|kernel-smp) +%kernel_variant_preun PAEdebug + +if [ -x /sbin/ldconfig ] +then + /sbin/ldconfig -X || exit $? +fi + +### +### file lists +### + +%if %{with_headers} +%files headers +%defattr(-,root,root) +/usr/include/* +%endif + +%if %{with_firmware} +%files firmware +%defattr(-,root,root) +/lib/firmware/* +%doc linux-%{kversion}.%{_target_cpu}/firmware/WHENCE +%endif + +%if %{with_bootwrapper} +%files bootwrapper +%defattr(-,root,root) +/usr/sbin/* +%{_libdir}/kernel-wrapper +%endif + +# only some architecture builds need kernel-doc +%if %{with_doc} +%files doc +%defattr(-,root,root) +%{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{rpmversion} +%{_datadir}/man/man9/* +%endif + +%if %{with_perf} +%files -n perf +%defattr(-,root,root) +%{_datadir}/doc/perf +/usr/sbin/perf +%{_datadir}/man/man1/* +%endif + +# This is %{image_install_path} on an arch where that includes ELF files, +# or empty otherwise. +%define elf_image_install_path %{?kernel_image_elf:%{image_install_path}} + +# +# This macro defines the %%files sections for a kernel package +# and its devel and debuginfo packages. +# %%kernel_variant_files [-k vmlinux] +# +%define kernel_variant_files(k:) \ +%if %{1}\ +%{expand:%%files %{?2}}\ +%defattr(-,root,root)\ +/%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:.%{2}}\ +/boot/System.map-%{KVERREL}%{?2:.%{2}}\ +%if %{with_perftool}\ +/usr/libexec/perf.%{KVERREL}%{?2:.%{2}}\ +%endif\ +#/boot/symvers-%{KVERREL}%{?2:.%{2}}.gz\ +/boot/config-%{KVERREL}%{?2:.%{2}}\ +%dir /lib/modules/%{KVERREL}%{?2:.%{2}}\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/kernel\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/build\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/source\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/extra\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/updates\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/weak-updates\ +%ifarch %{vdso_arches}\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/vdso\ +/etc/ld.so.conf.d/kernel-%{KVERREL}%{?2:.%{2}}.conf\ +%endif\ +/lib/modules/%{KVERREL}%{?2:.%{2}}/modules.*\ +%ghost /boot/initramfs-%{KVERREL}%{?2:.%{2}}.img\ +%{expand:%%files %{?2:%{2}-}devel}\ +%defattr(-,root,root)\ +%verify(not mtime) /usr/src/kernels/%{KVERREL}%{?2:.%{2}}\ +/usr/src/kernels/%{KVERREL}%{?2:.%{2}}\ +%if %{with_debuginfo}\ +%ifnarch noarch\ +%if %{fancy_debuginfo}\ +%{expand:%%files -f debuginfo%{?2}.list %{?2:%{2}-}debuginfo}\ +%else\ +%{expand:%%files %{?2:%{2}-}debuginfo}\ +%endif\ +%defattr(-,root,root)\ +%if !%{fancy_debuginfo}\ +%if "%{elf_image_install_path}" != ""\ +%{debuginfodir}/%{elf_image_install_path}/*-%{KVERREL}%{?2:.%{2}}.debug\ +%endif\ +%{debuginfodir}/lib/modules/%{KVERREL}%{?2:.%{2}}\ +%{debuginfodir}/usr/src/kernels/%{KVERREL}%{?2:.%{2}}\ +%endif\ +%endif\ +%endif\ +%endif\ +%{nil} + + +%kernel_variant_files %{with_up} +%kernel_variant_files %{with_smp} smp +%kernel_variant_files %{with_debug} debug +%kernel_variant_files %{with_pae} PAE +%kernel_variant_files %{with_pae_debug} PAEdebug + + +%changelog +* Mon Jul 26 2010 Chuck Ebbert 2.6.34.1-30 +- usb-obey-the-sysfs-power-wakeup-setting.patch: + Restore ability of USB devices to wake the machine (#617559) + +* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-29 +- cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch: + Fix a malicious redirect problem in the DNS lookup code (CVE-2010-2524) + +* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-28 +- input-synaptics-relax-capability-id-checks-on-new-hardware.patch: + Make mouse driver recognize newer synaptics hardware as touchpad. + +* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-27 +- ACPI GPE enable/disable patches: fix system powering back on + after shutdown (#613239) (and possibly #615858) + +* Thu Jul 22 2010 Jerome Glisse 2.6.34.1-26 +- radeon fix shared ddc handling (#593429) + +* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-25 +- kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch: + Fix crash in guest Python programs (#610911) + +* Wed Jul 21 2010 Chuck Ebbert 2.6.34.1-24 +- Drop crypto-aesni-kill-module_alias.patch; bug #571577 should + not be present in 2.6.34. + +* Wed Jul 21 2010 Dave Airlie 2.6.34.1-23 +- drm-intel-945gm-stability-fixes.patch: fix 945GM stability issues + +* Wed Jul 21 2010 Dave Airlie 2.6.34.1-22 +- double drop: its a revert on top of a revert. + +* Tue Jul 20 2010 Dave Airlie 2.6.34.1-21 +- drop drm revert, that can't possible cause the bug, but is causing another one. + +* Mon Jul 19 2010 Chuck Ebbert 2.6.34.1-20 +- pci-fall-back-to-original-bios-bar-addresses.patch: + Fix 2.6.34 problems with assigning PCI addresses (KORG#16263) + +* Mon Jul 19 2010 Chuck Ebbert 2.6.34.1-19 +- drm-i915-add-reclaimable-to-page-allocations.patch: + Additional fix for hibernation memory corruption bugs. + +* Sun Jul 18 2010 Chuck Ebbert 2.6.34.1-18 +- drm-i915-make-G4X-style-PLL-search-more-permissive.patch (#572799) + +* Sun Jul 18 2010 Hans de Goede 2.6.34.1-17 +- Fix inotify-fix-inotify-oneshot-support.patch so that it compiles +- Various small updates / fixes to the uvcvideo driver: + - Support dynamic menu controls (#576023) + - Fix the apple iSight camera not working (#600998) + +* Fri Jul 16 2010 Chuck Ebbert 2.6.34.1-16 +- inotify-fix-inotify-oneshot-support.patch, + inotify-send-IN_UNMOUNT-events.patch: + Fix broken oneshot support and missing umount events. (#607327) + +* Fri Jul 16 2010 Ben Skeggs 2.6.34.1-15 +- nouveau: fix lvds regression (#601002) +- nouveau: bring back acpi edid support, with fixes (#613284) +- nouveau: remove dcb1.5 quirk that breaks things (#595645) + +* Wed Jul 14 2010 Chuck Ebbert 2.6.34.1-14 +- Truncate the obsolete git bluetooth and firewire patches, use + ApplyOptionalPatch for bluetooth, cpufreq and firewire patches. + +* Wed Jul 14 2010 Chuck Ebbert 2.6.34.1-12 +- pci-pm-do-not-use-native-pcie-pme-by-default.patch: + fix PCIe hotplug interrupts firing continuously. (#613412) +- Update pci-acpi-disable-aspm-if-no-osc.patch so it works + with the above patch. +- Drop linux-2.6-defaults-pciehp.patch: pciehp_passive mode + does not exist anymore. + +* Tue Jul 13 2010 Ben Skeggs 2.6.34.1-11 +- nouveau: bring back patches lost from 2.6.34 update + add some more to + fix at least rhbz#532711 and rhbz#593046 +- remove patches relating to nouveau that are now unused + +* Mon Jul 12 2010 Dave Jones +- Remove a bunch of x86 options from config files that get set + automatically, and can't be overridden. + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-9 +- crypto-aesni-kill-module_alias.patch: kill MODULE_ALIAS to prevent + aesni-intel from autoloading. + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-8 +- iwlwifi: cancel scan watchdog in iwl_bg_abort_scan (#590436) + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-7 +- Restore PowerPC VIO modalias patch; use the upstream version. +- Drop Mac G5 thermal shutdown patch, now upstream. + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-6 +- Fix modpost segfault when building kernels. (#595915) + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-5 +- pci-change-error-messages-to-kern-info.patch: + Use new upstream patch to silence more useless messages. + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-4 +- sched-fix-over-scheduling-bug.patch: fix scheduler bug with CGROUPS + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-3 +- ethtool-fix-buffer-overflow.patch (CVE-2010-2478) + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-2 +- Copy fix for BZ#609548 from F-13 2.6.33 kernel. + +* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-1 +- Initial commit of 2.6.34 for F-13 +- Previous history is in the branch private-f14-2_6_34 + +* Wed Jul 07 2010 Chuck Ebbert +- pci-acpi-disable-aspm-if-no-osc.patch, pci-aspm-dont-enable-too-early.patch + PCI layer fixes for problems with hardware that doesn't support ASPM. + +* Wed Jul 07 2010 Chuck Ebbert +- attempt to fix hibernate on Intel GPUs (kernel.org #13811) (RHBZ#537494) + +* Wed Jul 07 2010 Chuck Ebbert +- Let ata_generic handle SATA interface on new MacBook Pro (#608034) + +* Tue Jul 06 2010 Chuck Ebbert +- Re-enable options: DYNAMIC_FTRACE, FUNCTION_TRACER and STACK_TRACER + +* Tue Jul 06 2010 Chuck Ebbert +- Linux 2.6.34.1 + +* Thu Jul 01 2010 Chuck Ebbert +- Linux 2.6.34.1-rc1 +- Drop patches merged upstream: + btrfs-should-add-permission-check-for-setfacl.patch (CVE-2010-2071) + iwlwifi-recalculate-average-tpt-if-not-current.patch + iwlwifi-fix-internal-scan-race.patch +- Revert DRM patches we already have: + drm-i915-rebind-bo-if-currently-bound-with-incorrect-alignment.patch + drm-radeon-fix-the-r100-r200-ums-block-0-page-fix.patch + drm-radeon-r100-r200-ums-block-ability-for-userspace-app-to-trash-0-page-and-beyond.patch + drm-radeon-kms-atom-fix-typo-in-lvds-panel-info-parsing.patch + drm-radeon-kms-reset-ddc_bus-in-object-header-parsing.patch + drm-edid-fix-1024x768-85hz.patch + drm-i915-reject-bind_to_gtt-early-if-object-aperture.patch + drm-i915-fix-82854-pci-id-and-treat-it-like-other-85x.patch +- Revert broken -stable patch: + perf-fix-endianness-argument-compatibility-with-opt_boolean-and-introduce-opt_incr.patch + +* Wed Jun 30 2010 Kyle McMartin +- Disable MRST on x86 here as well. + +* Tue Jun 29 2010 Kyle McMartin +- i915-fix-crt-hotplug-regression.patch: copy from rawhide. + +* Mon Jun 28 2010 Chuck Ebbert +- ppc64: enable active memory sharing and DLPAR memory remove (#607175) + +* Mon Jun 28 2010 Chuck Ebbert +- Copy fix for BZ#220892 from F-13. + +* Fri Jun 25 2010 Kyle McMartin +- drm-i915-fix-edp-panels.patch: copy from rawhide. + +* Mon Jun 21 2010 Dave Jones +- Disable workaround for obscure SMP pentium pro errata. + I miss the 1990s too, but it's time to move on. + If anyone actually needs this it would be better done using + the apply_alternatives infrastructure. + +* Mon Jun 21 2010 Kyle McMartin +- drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch + Revert eb1f8e4f, bisected by Nicolas Kaiser. Thanks! (rhbz#599190) + (If this works, will try to root-cause.) +- rebase previous patch on top of above reversion + +* Mon Jun 21 2010 Kyle McMartin +- revert-drm-kms-toggle-poll-around-switcheroo.patch (rhbz#599190) + +* Thu Jun 17 2010 Kyle McMartin +- Suck in patch from Dave Miller in 2.6.35 to add async hash testing, + hopefully fixes error from previous commit. (But making it modular + is still a good idea.) + +* Thu Jun 17 2010 Kyle McMartin +- make ghash-clmulni modular to get rid of early boot noise (rhbz#586954) + (not a /fix/ but it should at least quiet boot down a bit if you have + the cpu support) + +* Wed Jun 16 2010 Kyle McMartin +- Snag some more DRM commits into drm-next.patch that I missed the first + time. +- Fix up radeon_pm toggle to work with the upstream code. + +* Tue Jun 15 2010 Prarit Bhargava +- Turn off CONFIG_I2O on x86. + It is broken on 64-bit address spaces (i686/PAE, x86_64), and frankly, I'm + having trouble finding anyone who actually uses it. + +* Tue Jun 15 2010 Kyle McMartin +- Fix build by nuking superfluous "%{expand" which was missing a + trailing '}'. You may now reward me with an array of alcoholic + beverages, I so richly deserve for spending roughly a full + day staring at the diff of the spec. + +* Mon Jun 14 2010 Kyle McMartin +- btrfs ACL fixes from CVE-2010-2071. + +* Sun Jun 13 2010 Kyle McMartin +- remunge and reapply hdpvr-ir-enable + +* Sun Jun 13 2010 Kyle McMartin +- mac80211/iwlwifi fix connections to some APs (rhbz#558002) + patches from sgruszka@. + +* Sun Jun 13 2010 Kyle McMartin +- Provide a knob to enable radeon_pm to allow users to test + that functionality. Add radeon.pm=1 to your kernel cmdline + in order to enable it. (It still defaults to off though.) + +* Sun Jun 13 2010 Kyle McMartin +- Update drm-next to include fixes since 2.6.35-rc1. + +* Fri Jun 11 2010 Justin M. Forbes +- Disable xsave for so that kernel will boot on ancient EC2 hosts. + +* Wed Jun 09 2010 John W. Linville +- Disable rt20xx and rt35xx chipset support in rt2800 drivers (#570869) + +* Wed Jun 09 2010 David Woodhouse +- Include PHY modules in modules.networking (#602155) + + +### +# The following Emacs magic makes C-c C-e use UTC dates. +# Local Variables: +# rpm-change-log-uses-utc: t +# End: +### diff --git a/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch b/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch new file mode 100644 index 000000000..eefdda5ce --- /dev/null +++ b/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch @@ -0,0 +1,49 @@ +From: Xiao Guangrong +Date: Wed, 30 Jun 2010 08:02:45 +0000 (+0800) +Subject: KVM: MMU: fix conflict access permissions in direct sp +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=6aa0b9dec5d6dde26ea17b0b5be8fccfe19df3c9 + +KVM: MMU: fix conflict access permissions in direct sp + +In no-direct mapping, we mark sp is 'direct' when we mapping the +guest's larger page, but its access is encoded form upper page-struct +entire not include the last mapping, it will cause access conflict. + +For example, have this mapping: + [W] + / PDE1 -> |---| + P[W] | | LPA + \ PDE2 -> |---| + [R] + +P have two children, PDE1 and PDE2, both PDE1 and PDE2 mapping the +same lage page(LPA). The P's access is WR, PDE1's access is WR, +PDE2's access is RO(just consider read-write permissions here) + +When guest access PDE1, we will create a direct sp for LPA, the sp's +access is from P, is W, then we will mark the ptes is W in this sp. + +Then, guest access PDE2, we will find LPA's shadow page, is the same as +PDE's, and mark the ptes is RO. + +So, if guest access PDE1, the incorrect #PF is occured. + +Fixed by encode the last mapping access into direct shadow page + +Signed-off-by: Xiao Guangrong +Signed-off-by: Marcelo Tosatti +Signed-off-by: Avi Kivity +--- + +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h +index 89d66ca..2331bdc 100644 +--- a/arch/x86/kvm/paging_tmpl.h ++++ b/arch/x86/kvm/paging_tmpl.h +@@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, + /* advance table_gfn when emulating 1gb pages with 4k */ + if (delta == 0) + table_gfn += PT_INDEX(addr, level); ++ access &= gw->pte_access; + } else { + direct = 0; + table_gfn = gw->table_gfn[level - 2]; diff --git a/linux-2.6-acpi-sleep-live-sci-live.patch b/linux-2.6-acpi-sleep-live-sci-live.patch new file mode 100644 index 000000000..5d4239c69 --- /dev/null +++ b/linux-2.6-acpi-sleep-live-sci-live.patch @@ -0,0 +1,51 @@ +commit 7ba0dea4158155a68b833982199691dbc2d4e6dc +Author: Matthew Garrett +Date: Mon Apr 19 16:51:39 2010 -0400 + + acpi: Fall back to manually changing SCI_EN + + The ACPI spec tells us that the ACPI SCI_EN bit is under hardware control + and shouldn't be touched by the OS. It seems that the Leading Other OS + ignores this and some machines expect this behaviour. We have a blacklist + for these, but given that we're able to detect the failure case and the + alternative to breaking the spec is letting the machine crash and burn, + let's try falling back when we know the alternative is a mostly-dead + machine. + + Signed-off-by: Matthew Garrett + +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index f74834a..79df8d4 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -227,6 +227,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state) + static int acpi_suspend_enter(suspend_state_t pm_state) + { + acpi_status status = AE_OK; ++ acpi_status enable_status = AE_OK; + unsigned long flags = 0; + u32 acpi_state = acpi_target_sleep_state; + +@@ -254,10 +255,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state) + } + + /* If ACPI is not enabled by the BIOS, we need to enable it here. */ +- if (set_sci_en_on_resume) ++ if (!set_sci_en_on_resume) ++ enable_status = acpi_enable(); ++ ++ if (set_sci_en_on_resume || enable_status == AE_NO_HARDWARE_RESPONSE) ++ /* If we're still in legacy mode then we have a problem. The ++ * spec tells us that this bit is under hardware control, but ++ * there's no plausible way that the OS can transition back to ++ * legacy mode so our choices here are to either ignore the ++ * spec or crash and burn horribly. The latter doesn't seem ++ * like it's ever going to be the preferable choice, so let's ++ * live dangerously. ++ */ + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); +- else +- acpi_enable(); + + /* Reprogram control registers and execute _BFS */ + acpi_leave_sleep_state_prep(acpi_state); diff --git a/linux-2.6-acpi-video-dos.patch b/linux-2.6-acpi-video-dos.patch new file mode 100644 index 000000000..3e2085193 --- /dev/null +++ b/linux-2.6-acpi-video-dos.patch @@ -0,0 +1,17 @@ +Disable firmware video brightness change on AC/Battery switch by default + +-- mjg59 + +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index bac2901..93b1a9e 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -1818,7 +1818,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video) + + static int acpi_video_bus_start_devices(struct acpi_video_bus *video) + { +- return acpi_video_bus_DOS(video, 0, 0); ++ return acpi_video_bus_DOS(video, 0, 1); + } + + static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) diff --git a/linux-2.6-acpi-video-export-edid.patch b/linux-2.6-acpi-video-export-edid.patch new file mode 100644 index 000000000..52ddd878f --- /dev/null +++ b/linux-2.6-acpi-video-export-edid.patch @@ -0,0 +1,199 @@ +From 023f5b2d1fdad6ffe33a204a4e76e38edba9d9e5 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Thu, 20 May 2010 08:59:58 -0400 +Subject: linux-2.6-acpi-video-export-edid.patch + +--- + drivers/acpi/video.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++---- + include/acpi/video.h | 16 +++++++ + 2 files changed, 118 insertions(+), 8 deletions(-) + +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index a0c93b3..4b8bda1 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -45,6 +45,7 @@ + #include + #include + #include ++#include + + #define PREFIX "ACPI: " + +@@ -65,11 +66,6 @@ + + #define MAX_NAME_LEN 20 + +-#define ACPI_VIDEO_DISPLAY_CRT 1 +-#define ACPI_VIDEO_DISPLAY_TV 2 +-#define ACPI_VIDEO_DISPLAY_DVI 3 +-#define ACPI_VIDEO_DISPLAY_LCD 4 +- + #define _COMPONENT ACPI_VIDEO_COMPONENT + ACPI_MODULE_NAME("video"); + +@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id + } + + static int ++acpi_video_get_device_type(struct acpi_video_bus *video, ++ unsigned long device_id) ++{ ++ struct acpi_video_enumerated_device *ids; ++ int i; ++ ++ for (i = 0; i < video->attached_count; i++) { ++ ids = &video->attached_array[i]; ++ if ((ids->value.int_val & 0xffff) == device_id) ++ return ids->value.int_val; ++ } ++ ++ return 0; ++} ++ ++static int + acpi_video_bus_get_one_device(struct acpi_device *device, + struct acpi_video_bus *video) + { + unsigned long long device_id; +- int status; ++ int status, device_type; + struct acpi_video_device *data; + struct acpi_video_device_attrib* attribute; + +@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device, + } + if(attribute->bios_can_detect) + data->flags.bios = 1; +- } else +- data->flags.unknown = 1; ++ } else { ++ /* Check for legacy IDs */ ++ device_type = acpi_video_get_device_type(video, ++ device_id); ++ /* Ignore bits 16 and 18-20 */ ++ switch (device_type & 0xffe2ffff) { ++ case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR: ++ data->flags.crt = 1; ++ break; ++ case ACPI_VIDEO_DISPLAY_LEGACY_PANEL: ++ data->flags.lcd = 1; ++ break; ++ case ACPI_VIDEO_DISPLAY_LEGACY_TV: ++ data->flags.tvout = 1; ++ break; ++ default: ++ data->flags.unknown = 1; ++ } ++ } + + acpi_video_device_bind(video, data); + acpi_video_device_find_cap(data); +@@ -2032,6 +2061,71 @@ out: + return result; + } + ++int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, ++ void **edid) ++{ ++ struct acpi_video_bus *video; ++ struct acpi_video_device *video_device; ++ union acpi_object *buffer = NULL; ++ acpi_status status; ++ int i, length; ++ ++ if (!device || !acpi_driver_data(device)) ++ return -EINVAL; ++ ++ video = acpi_driver_data(device); ++ ++ for (i = 0; i < video->attached_count; i++) { ++ video_device = video->attached_array[i].bind_info; ++ length = 256; ++ ++ if (!video_device) ++ continue; ++ ++ if (type) { ++ switch (type) { ++ case ACPI_VIDEO_DISPLAY_CRT: ++ if (!video_device->flags.crt) ++ continue; ++ break; ++ case ACPI_VIDEO_DISPLAY_TV: ++ if (!video_device->flags.tvout) ++ continue; ++ break; ++ case ACPI_VIDEO_DISPLAY_DVI: ++ if (!video_device->flags.dvi) ++ continue; ++ break; ++ case ACPI_VIDEO_DISPLAY_LCD: ++ if (!video_device->flags.lcd) ++ continue; ++ break; ++ } ++ } else if (video_device->device_id != device_id) { ++ continue; ++ } ++ ++ status = acpi_video_device_EDID(video_device, &buffer, length); ++ ++ if (ACPI_FAILURE(status) || !buffer || ++ buffer->type != ACPI_TYPE_BUFFER) { ++ length = 128; ++ status = acpi_video_device_EDID(video_device, &buffer, ++ length); ++ if (ACPI_FAILURE(status) || !buffer || ++ buffer->type != ACPI_TYPE_BUFFER) { ++ continue; ++ } ++ } ++ ++ *edid = buffer->buffer.pointer; ++ return length; ++ } ++ ++ return -ENODEV; ++} ++EXPORT_SYMBOL(acpi_video_get_edid); ++ + static int + acpi_video_bus_get_devices(struct acpi_video_bus *video, + struct acpi_device *device) +diff --git a/include/acpi/video.h b/include/acpi/video.h +index cf7be3d..551793c 100644 +--- a/include/acpi/video.h ++++ b/include/acpi/video.h +@@ -1,12 +1,28 @@ + #ifndef __ACPI_VIDEO_H + #define __ACPI_VIDEO_H + ++#define ACPI_VIDEO_DISPLAY_CRT 1 ++#define ACPI_VIDEO_DISPLAY_TV 2 ++#define ACPI_VIDEO_DISPLAY_DVI 3 ++#define ACPI_VIDEO_DISPLAY_LCD 4 ++ ++#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100 ++#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110 ++#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 ++ + #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) + extern int acpi_video_register(void); + extern void acpi_video_unregister(void); ++extern int acpi_video_get_edid(struct acpi_device *device, int type, ++ int device_id, void **edid); + #else + static inline int acpi_video_register(void) { return 0; } + static inline void acpi_video_unregister(void) { return; } ++static inline int acpi_video_get_edid(struct acpi_device *device, int type, ++ int device_id, void **edid) ++{ ++ return -ENODEV; ++} + #endif + + #endif +-- +1.7.0.1 + diff --git a/linux-2.6-build-nonintconfig.patch b/linux-2.6-build-nonintconfig.patch new file mode 100644 index 000000000..e88e0ea1e --- /dev/null +++ b/linux-2.6-build-nonintconfig.patch @@ -0,0 +1,128 @@ +diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile +index 6d69c7c..ff84d12 100644 +--- a/scripts/kconfig/Makefile ++++ b/scripts/kconfig/Makefile +@@ -58,6 +58,11 @@ localyesconfig: $(obj)/streamline_config.pl $(obj)/conf + fi + $(Q)rm -f .tmp.config + ++nonint_oldconfig: $(obj)/conf ++ $< -b $(Kconfig) ++loose_nonint_oldconfig: $(obj)/conf ++ $< -B $(Kconfig) ++ + # Create new linux.pot file + # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files + # The symlink is used to repair a deficiency in arch/um +diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c +index 9960d1c..ac8d455 100644 +--- a/scripts/kconfig/conf.c ++++ b/scripts/kconfig/conf.c +@@ -23,6 +23,8 @@ enum { + ask_all, + ask_new, + ask_silent, ++ dont_ask, ++ dont_ask_dont_tell, + set_default, + set_yes, + set_mod, +@@ -360,7 +362,10 @@ static void conf(struct menu *menu) + + switch (prop->type) { + case P_MENU: +- if (input_mode == ask_silent && rootEntry != menu) { ++ if ((input_mode == ask_silent || ++ input_mode == dont_ask || ++ input_mode == dont_ask_dont_tell) && ++ rootEntry != menu) { + check_conf(menu); + return; + } +@@ -406,6 +411,8 @@ conf_childs: + indent -= 2; + } + ++static int return_value; ++ + static void check_conf(struct menu *menu) + { + struct symbol *sym; +@@ -418,12 +425,21 @@ static void check_conf(struct menu *menu) + if (sym && !sym_has_value(sym)) { + if (sym_is_changable(sym) || + (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) { ++ if (input_mode == dont_ask || ++ input_mode == dont_ask_dont_tell) { ++ if (input_mode == dont_ask && ++ sym->name && !sym_is_choice_value(sym)) { ++ fprintf(stderr,"CONFIG_%s\n",sym->name); ++ ++return_value; ++ } ++ } else { + if (!conf_cnt++) + printf(_("*\n* Restart config...\n*\n")); + rootEntry = menu_get_parent_menu(menu); + conf(rootEntry); + } + } ++ } + + for (child = menu->list; child; child = child->next) + check_conf(child); +@@ -439,7 +455,7 @@ int main(int ac, char **av) + bindtextdomain(PACKAGE, LOCALEDIR); + textdomain(PACKAGE); + +- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) { ++ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) { + switch (opt) { + case 'o': + input_mode = ask_silent; +@@ -448,6 +464,12 @@ int main(int ac, char **av) + input_mode = ask_silent; + sync_kconfig = 1; + break; ++ case 'b': ++ input_mode = dont_ask; ++ break; ++ case 'B': ++ input_mode = dont_ask_dont_tell; ++ break; + case 'd': + input_mode = set_default; + break; +@@ -525,6 +547,8 @@ int main(int ac, char **av) + case ask_silent: + case ask_all: + case ask_new: ++ case dont_ask: ++ case dont_ask_dont_tell: + conf_read(NULL); + break; + case set_no: +@@ -586,12 +610,16 @@ int main(int ac, char **av) + conf(&rootmenu); + input_mode = ask_silent; + /* fall through */ ++ case dont_ask: ++ case dont_ask_dont_tell: + case ask_silent: + /* Update until a loop caused no more changes */ + do { + conf_cnt = 0; + check_conf(&rootmenu); +- } while (conf_cnt); ++ } while (conf_cnt && ++ (input_mode != dont_ask && ++ input_mode != dont_ask_dont_tell)); + break; + } + +@@ -613,5 +641,5 @@ int main(int ac, char **av) + exit(1); + } + } +- return 0; ++ return return_value; + } diff --git a/linux-2.6-cantiga-iommu-gfx.patch b/linux-2.6-cantiga-iommu-gfx.patch new file mode 100644 index 000000000..a18e38ba9 --- /dev/null +++ b/linux-2.6-cantiga-iommu-gfx.patch @@ -0,0 +1,26 @@ +diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c +index 4173125..baa32a0 100644 +--- a/drivers/pci/intel-iommu.c ++++ b/drivers/pci/intel-iommu.c +@@ -340,7 +340,7 @@ int dmar_disabled = 0; + int dmar_disabled = 1; + #endif /*CONFIG_DMAR_DEFAULT_ON*/ + +-static int __initdata dmar_map_gfx = 1; ++static int dmar_map_gfx = 1; + static int dmar_forcedac; + static int intel_iommu_strict; + +@@ -3728,6 +3728,12 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) + */ + printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); + rwbf_quirk = 1; ++ ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ ++ if (dev->revision == 0x07) { ++ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); ++ dmar_map_gfx = 0; ++ } + } + + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); diff --git a/linux-2.6-compile-fixes.patch b/linux-2.6-compile-fixes.patch new file mode 100644 index 000000000..34c08ce47 --- /dev/null +++ b/linux-2.6-compile-fixes.patch @@ -0,0 +1,6 @@ +# +# Small compile fixes (For more involved fixes, please use a separate patch). +# +# Please add the errors from gcc before the diffs to save others having +# to do a compile to figure out what your diff is fixing. Thanks. +# diff --git a/linux-2.6-crash-driver.patch b/linux-2.6-crash-driver.patch new file mode 100644 index 000000000..7b518bb88 --- /dev/null +++ b/linux-2.6-crash-driver.patch @@ -0,0 +1,385 @@ +From df42d15cd28f468ecd4c30465b98a53cce90617c Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Tue, 30 Mar 2010 00:16:25 -0400 +Subject: dev-crash-driver.patch + +--- + arch/ia64/include/asm/crash.h | 90 +++++++++++++++++++++++++++++ + arch/ia64/kernel/ia64_ksyms.c | 3 + + arch/x86/include/asm/crash.h | 75 ++++++++++++++++++++++++ + arch/x86/mm/ioremap.c | 2 + + drivers/char/Kconfig | 2 + + drivers/char/Makefile | 2 + + drivers/char/crash.c | 128 +++++++++++++++++++++++++++++++++++++++++ + 7 files changed, 302 insertions(+), 0 deletions(-) + create mode 100644 arch/ia64/include/asm/crash.h + create mode 100644 arch/x86/include/asm/crash.h + create mode 100644 drivers/char/crash.c + +diff --git a/arch/ia64/include/asm/crash.h b/arch/ia64/include/asm/crash.h +new file mode 100644 +index 0000000..541af84 +--- /dev/null ++++ b/arch/ia64/include/asm/crash.h +@@ -0,0 +1,90 @@ ++#ifndef _ASM_IA64_CRASH_H ++#define _ASM_IA64_CRASH_H ++ ++/* ++ * linux/include/asm-ia64/crash.h ++ * ++ * Copyright (c) 2004 Red Hat, Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++ ++#ifdef __KERNEL__ ++ ++#include ++#include ++#include ++ ++static inline void * ++map_virtual(u64 offset, struct page **pp) ++{ ++ struct page *page; ++ unsigned long pfn; ++ u32 type; ++ ++ if (REGION_NUMBER(offset) == 5) { ++ char byte; ++ ++ if (__get_user(byte, (char *)offset) == 0) ++ return (void *)offset; ++ else ++ return NULL; ++ } ++ ++ switch (type = efi_mem_type(offset)) ++ { ++ case EFI_LOADER_CODE: ++ case EFI_LOADER_DATA: ++ case EFI_BOOT_SERVICES_CODE: ++ case EFI_BOOT_SERVICES_DATA: ++ case EFI_CONVENTIONAL_MEMORY: ++ break; ++ ++ default: ++ printk(KERN_INFO ++ "crash memory driver: invalid memory type for %lx: %d\n", ++ offset, type); ++ return NULL; ++ } ++ ++ pfn = offset >> PAGE_SHIFT; ++ ++ if (!pfn_valid(pfn)) { ++ printk(KERN_INFO ++ "crash memory driver: invalid pfn: %lx )\n", pfn); ++ return NULL; ++ } ++ ++ page = pfn_to_page(pfn); ++ ++ if (!page->virtual) { ++ printk(KERN_INFO ++ "crash memory driver: offset: %lx page: %lx page->virtual: NULL\n", ++ offset, (unsigned long)page); ++ return NULL; ++ } ++ ++ return (page->virtual + (offset & (PAGE_SIZE-1))); ++} ++ ++static inline void unmap_virtual(struct page *page) ++{ ++ return; ++} ++ ++#endif /* __KERNEL__ */ ++ ++#endif /* _ASM_IA64_CRASH_H */ +diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c +index 7f4a0ed..552fe24 100644 +--- a/arch/ia64/kernel/ia64_ksyms.c ++++ b/arch/ia64/kernel/ia64_ksyms.c +@@ -84,6 +84,9 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs); + #include + EXPORT_SYMBOL(unw_init_running); + ++#include ++EXPORT_SYMBOL_GPL(efi_mem_type); ++ + #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) + extern void esi_call_phys (void); + EXPORT_SYMBOL_GPL(esi_call_phys); +diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h +new file mode 100644 +index 0000000..dfcc006 +--- /dev/null ++++ b/arch/x86/include/asm/crash.h +@@ -0,0 +1,75 @@ ++#ifndef _ASM_I386_CRASH_H ++#define _ASM_I386_CRASH_H ++ ++/* ++ * linux/include/asm-i386/crash.h ++ * ++ * Copyright (c) 2004 Red Hat, Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++ ++#ifdef __KERNEL__ ++ ++#include ++#include ++#include ++ ++extern int page_is_ram(unsigned long); ++ ++static inline void * ++map_virtual(u64 offset, struct page **pp) ++{ ++ struct page *page; ++ unsigned long pfn; ++ void *vaddr; ++ ++ pfn = (unsigned long)(offset >> PAGE_SHIFT); ++ ++ if (!page_is_ram(pfn)) { ++ printk(KERN_INFO ++ "crash memory driver: !page_is_ram(pfn: %lx)\n", pfn); ++ return NULL; ++ } ++ ++ if (!pfn_valid(pfn)) { ++ printk(KERN_INFO ++ "crash memory driver: invalid pfn: %lx )\n", pfn); ++ return NULL; ++ } ++ ++ page = pfn_to_page(pfn); ++ ++ vaddr = kmap(page); ++ if (!vaddr) { ++ printk(KERN_INFO ++ "crash memory driver: pfn: %lx kmap(page: %lx) failed\n", ++ pfn, (unsigned long)page); ++ return NULL; ++ } ++ ++ *pp = page; ++ return (vaddr + (offset & (PAGE_SIZE-1))); ++} ++ ++static inline void unmap_virtual(struct page *page) ++{ ++ kunmap(page); ++} ++ ++#endif /* __KERNEL__ */ ++ ++#endif /* _ASM_I386_CRASH_H */ +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c +index 5eb1ba7..3e525d2 100644 +--- a/arch/x86/mm/ioremap.c ++++ b/arch/x86/mm/ioremap.c +@@ -24,6 +24,8 @@ + + #include "physaddr.h" + ++EXPORT_SYMBOL_GPL(page_is_ram); ++ + /* + * Fix up the linear direct mapping of the kernel to avoid cache attribute + * conflicts. +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index 3141dd3..153658c 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -471,6 +471,8 @@ config LEGACY_PTYS + security. This option enables these legacy devices; on most + systems, it is safe to say N. + ++config CRASH ++ tristate "Crash Utility memory driver" + + config LEGACY_PTY_COUNT + int "Maximum number of legacy PTY in use" +diff --git a/drivers/char/Makefile b/drivers/char/Makefile +index f957edf..604c418 100644 +--- a/drivers/char/Makefile ++++ b/drivers/char/Makefile +@@ -111,6 +111,8 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o + obj-$(CONFIG_JS_RTC) += js-rtc.o + js-rtc-y = rtc.o + ++obj-$(CONFIG_CRASH) += crash.o ++ + # Files generated that shall be removed upon make clean + clean-files := consolemap_deftbl.c defkeymap.c + +diff --git a/drivers/char/crash.c b/drivers/char/crash.c +new file mode 100644 +index 0000000..e5437de +--- /dev/null ++++ b/drivers/char/crash.c +@@ -0,0 +1,128 @@ ++/* ++ * linux/drivers/char/crash.c ++ * ++ * Copyright (C) 2004 Dave Anderson ++ * Copyright (C) 2004 Red Hat, Inc. ++ */ ++ ++/****************************************************************************** ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ *****************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CRASH_VERSION "1.0" ++ ++/* ++ * These are the file operation functions that allow crash utility ++ * access to physical memory. ++ */ ++ ++static loff_t ++crash_llseek(struct file * file, loff_t offset, int orig) ++{ ++ switch (orig) { ++ case 0: ++ file->f_pos = offset; ++ return file->f_pos; ++ case 1: ++ file->f_pos += offset; ++ return file->f_pos; ++ default: ++ return -EINVAL; ++ } ++} ++ ++/* ++ * Determine the page address for an address offset value, ++ * get a virtual address for it, and copy it out. ++ * Accesses must fit within a page. ++ */ ++static ssize_t ++crash_read(struct file *file, char *buf, size_t count, loff_t *poff) ++{ ++ void *vaddr; ++ struct page *page; ++ u64 offset; ++ ssize_t read; ++ ++ offset = *poff; ++ if (offset >> PAGE_SHIFT != (offset+count-1) >> PAGE_SHIFT) ++ return -EINVAL; ++ ++ vaddr = map_virtual(offset, &page); ++ if (!vaddr) ++ return -EFAULT; ++ ++ if (copy_to_user(buf, vaddr, count)) { ++ unmap_virtual(page); ++ return -EFAULT; ++ } ++ unmap_virtual(page); ++ ++ read = count; ++ *poff += read; ++ return read; ++} ++ ++static struct file_operations crash_fops = { ++ .owner = THIS_MODULE, ++ .llseek = crash_llseek, ++ .read = crash_read, ++}; ++ ++static struct miscdevice crash_dev = { ++ MISC_DYNAMIC_MINOR, ++ "crash", ++ &crash_fops ++}; ++ ++static int __init ++crash_init(void) ++{ ++ int ret; ++ ++ ret = misc_register(&crash_dev); ++ if (ret) { ++ printk(KERN_ERR ++ "crash memory driver: cannot misc_register (MISC_DYNAMIC_MINOR)\n"); ++ goto out; ++ } ++ ++ ret = 0; ++ printk(KERN_INFO "crash memory driver: version %s\n", CRASH_VERSION); ++out: ++ return ret; ++} ++ ++static void __exit ++crash_cleanup_module(void) ++{ ++ misc_deregister(&crash_dev); ++} ++ ++module_init(crash_init); ++module_exit(crash_cleanup_module); ++ ++MODULE_LICENSE("GPL"); +-- +1.7.0.1 + diff --git a/linux-2.6-debug-always-inline-kzalloc.patch b/linux-2.6-debug-always-inline-kzalloc.patch new file mode 100644 index 000000000..24f665ca6 --- /dev/null +++ b/linux-2.6-debug-always-inline-kzalloc.patch @@ -0,0 +1,25 @@ +From 76ec0e2e6d6edf81abc0331d5e7873ef7b2f6019 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Wed, 8 Jul 2009 13:06:01 -0400 +Subject: [PATCH 6/6] fedora: linux-2.6-debug-always-inline-kzalloc.patch + +--- + include/linux/slab.h | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 2da8372..d4ef74f 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -310,7 +310,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kmalloc). + */ +-static inline void *kzalloc(size_t size, gfp_t flags) ++static __always_inline void *kzalloc(size_t size, gfp_t flags) + { + return kmalloc(size, flags | __GFP_ZERO); + } +-- +1.6.2.5 + diff --git a/linux-2.6-debug-nmi-timeout.patch b/linux-2.6-debug-nmi-timeout.patch new file mode 100644 index 000000000..f54d26ae9 --- /dev/null +++ b/linux-2.6-debug-nmi-timeout.patch @@ -0,0 +1,45 @@ +From 542dee6f43067fa0101b53925aadf1d08c997cd4 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 29 Mar 2010 23:40:27 -0400 +Subject: linux-2.6-debug-nmi-timeout + +--- + arch/x86/kernel/apic/nmi.c | 2 +- + lib/Kconfig.debug | 8 ++++++++ + 2 files changed, 9 insertions(+), 1 deletions(-) + +diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c +index 8aa65ad..ba7d55e 100644 +--- a/arch/x86/kernel/apic/nmi.c ++++ b/arch/x86/kernel/apic/nmi.c +@@ -439,7 +439,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) + * wait a few IRQs (5 seconds) before doing the oops ... + */ + __this_cpu_inc(alert_counter); +- if (__this_cpu_read(alert_counter) == 5 * nmi_hz) ++ if (__this_cpu_read(alert_counter) == CONFIG_DEBUG_NMI_TIMEOUT * nmi_hz) + /* + * die_nmi will return ONLY if NOTIFY_STOP happens.. + */ +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 1fafb4b..963e78b 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -254,6 +254,14 @@ config SCHEDSTATS + application, you can say N to avoid the very slight overhead + this adds. + ++config DEBUG_NMI_TIMEOUT ++ int "Number of seconds before NMI timeout" ++ depends on X86 ++ default 5 ++ help ++ This value is the number of seconds the NMI watchdog will tick ++ before it decides the machine has hung. ++ + config TIMER_STATS + bool "Collect kernel timers statistics" + depends on DEBUG_KERNEL && PROC_FS +-- +1.7.0.1 + diff --git a/linux-2.6-debug-sizeof-structs.patch b/linux-2.6-debug-sizeof-structs.patch new file mode 100644 index 000000000..cc7747d1f --- /dev/null +++ b/linux-2.6-debug-sizeof-structs.patch @@ -0,0 +1,31 @@ +diff --git a/init/main.c b/init/main.c +index 7449819..98cfaae 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -369,6 +369,10 @@ static void __init setup_nr_cpu_ids(void) + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + } + ++#include ++#include ++#include ++ + /* Called by boot processor to activate the rest. */ + static void __init smp_init(void) + { +@@ -391,6 +395,15 @@ static void __init smp_init(void) + /* Any cleanup work */ + printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); + smp_cpus_done(setup_max_cpus); ++ ++ printk(KERN_DEBUG "sizeof(vma)=%u bytes\n", (unsigned int) sizeof(struct vm_area_struct)); ++ printk(KERN_DEBUG "sizeof(page)=%u bytes\n", (unsigned int) sizeof(struct page)); ++ printk(KERN_DEBUG "sizeof(inode)=%u bytes\n", (unsigned int) sizeof(struct inode)); ++ printk(KERN_DEBUG "sizeof(dentry)=%u bytes\n", (unsigned int) sizeof(struct dentry)); ++ printk(KERN_DEBUG "sizeof(ext3inode)=%u bytes\n", (unsigned int) sizeof(struct ext3_inode_info)); ++ printk(KERN_DEBUG "sizeof(buffer_head)=%u bytes\n", (unsigned int) sizeof(struct buffer_head)); ++ printk(KERN_DEBUG "sizeof(skbuff)=%u bytes\n", (unsigned int) sizeof(struct sk_buff)); ++ printk(KERN_DEBUG "sizeof(task_struct)=%u bytes\n", (unsigned int) sizeof(struct task_struct)); + } + + #endif diff --git a/linux-2.6-debug-taint-vm.patch b/linux-2.6-debug-taint-vm.patch new file mode 100644 index 000000000..ee367d45a --- /dev/null +++ b/linux-2.6-debug-taint-vm.patch @@ -0,0 +1,65 @@ +From b04c57d9dc889462951312be2ac81ff6c702e954 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Wed, 8 Jul 2009 13:05:09 -0400 +Subject: [PATCH 3/6] fedora: linux-2.6-debug-taint-vm.patch + +--- + kernel/panic.c | 4 +++- + mm/slab.c | 8 ++++---- + mm/slub.c | 2 +- + 4 files changed, 11 insertions(+), 8 deletions(-) + +diff --git a/kernel/panic.c b/kernel/panic.c +index 984b3ec..6d1c3be 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -199,6 +199,7 @@ const char *print_tainted(void) + + return buf; + } ++EXPORT_SYMBOL(print_tainted); + + int test_taint(unsigned flag) + { +diff --git a/mm/slab.c b/mm/slab.c +index e74a16e..7bc287e 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -1803,8 +1803,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) + /* Print header */ + if (lines == 0) { + printk(KERN_ERR +- "Slab corruption: %s start=%p, len=%d\n", +- cachep->name, realobj, size); ++ "Slab corruption (%s): %s start=%p, len=%d\n", ++ print_tainted(), cachep->name, realobj, size); + print_objinfo(cachep, objp, 0); + } + /* Hexdump the affected line */ +@@ -2902,8 +2902,8 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) + if (entries != cachep->num - slabp->inuse) { + bad: + printk(KERN_ERR "slab: Internal list corruption detected in " +- "cache '%s'(%d), slabp %p(%d). Hexdump:\n", +- cachep->name, cachep->num, slabp, slabp->inuse); ++ "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n", ++ cachep->name, cachep->num, slabp, slabp->inuse, print_tainted()); + for (i = 0; + i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); + i++) { +diff --git a/mm/slub.c b/mm/slub.c +index 819f056..8eff0f4 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -433,7 +433,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) + va_end(args); + printk(KERN_ERR "========================================" + "=====================================\n"); +- printk(KERN_ERR "BUG %s: %s\n", s->name, buf); ++ printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); + printk(KERN_ERR "----------------------------------------" + "-------------------------------------\n\n"); + } +-- +1.6.2.5 + diff --git a/linux-2.6-debug-vm-would-have-oomkilled.patch b/linux-2.6-debug-vm-would-have-oomkilled.patch new file mode 100644 index 000000000..dd8ba3f0a --- /dev/null +++ b/linux-2.6-debug-vm-would-have-oomkilled.patch @@ -0,0 +1,64 @@ +From 03657519851cd180983db4bd0c38eaeed4aa2962 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 11 Jan 2010 08:25:12 -0500 +Subject: linux-2.6-debug-vm-would-have-oomkilled.patch + +--- + kernel/sysctl.c | 8 ++++++++ + mm/oom_kill.c | 7 +++++++ + 2 files changed, 15 insertions(+), 0 deletions(-) + +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 8a68b24..72a4ff1 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -71,6 +71,7 @@ extern int sysctl_overcommit_ratio; + extern int sysctl_panic_on_oom; + extern int sysctl_oom_kill_allocating_task; + extern int sysctl_oom_dump_tasks; ++extern int sysctl_would_have_oomkilled; + extern int max_threads; + extern int core_uses_pid; + extern int suid_dumpable; +@@ -973,6 +974,13 @@ static struct ctl_table vm_table[] = { + .proc_handler = proc_dointvec, + }, + { ++ .procname = "would_have_oomkilled", ++ .data = &sysctl_would_have_oomkilled, ++ .maxlen = sizeof(sysctl_would_have_oomkilled), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec, ++ }, ++ { + .procname = "overcommit_ratio", + .data = &sysctl_overcommit_ratio, + .maxlen = sizeof(sysctl_overcommit_ratio), +diff --git a/mm/oom_kill.c b/mm/oom_kill.c +index f52481b..a892f07 100644 +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -31,6 +31,7 @@ + int sysctl_panic_on_oom; + int sysctl_oom_kill_allocating_task; + int sysctl_oom_dump_tasks; ++int sysctl_would_have_oomkilled; + static DEFINE_SPINLOCK(zone_scan_lock); + /* #define DEBUG */ + +@@ -396,6 +397,12 @@ static void __oom_kill_task(struct task_struct *p, int verbose) + return; + } + ++ if (sysctl_would_have_oomkilled == 1) { ++ printk(KERN_ERR "Would have killed process %d (%s). But continuing instead.\n", ++ task_pid_nr(p), p->comm); ++ return; ++ } ++ + if (verbose) + printk(KERN_ERR "Killed process %d (%s) " + "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n", +-- +1.6.5.2 + diff --git a/linux-2.6-defaults-acpi-video.patch b/linux-2.6-defaults-acpi-video.patch new file mode 100644 index 000000000..af883b0d3 --- /dev/null +++ b/linux-2.6-defaults-acpi-video.patch @@ -0,0 +1,13 @@ +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index d8d7596..a1b7117 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -71,7 +71,7 @@ MODULE_AUTHOR("Bruno Ducrot"); + MODULE_DESCRIPTION("ACPI Video Driver"); + MODULE_LICENSE("GPL"); + +-static int brightness_switch_enabled = 1; ++static int brightness_switch_enabled = 0; + module_param(brightness_switch_enabled, bool, 0644); + + static int acpi_video_bus_add(struct acpi_device *device); diff --git a/linux-2.6-defaults-aspm.patch b/linux-2.6-defaults-aspm.patch new file mode 100644 index 000000000..49b832d2c --- /dev/null +++ b/linux-2.6-defaults-aspm.patch @@ -0,0 +1,12 @@ +diff -up linux-2.6.30.noarch/drivers/pci/pcie/aspm.c.mjg linux-2.6.30.noarch/drivers/pci/pcie/aspm.c +--- linux-2.6.30.noarch/drivers/pci/pcie/aspm.c.mjg 2009-07-16 22:01:11.000000000 +0100 ++++ linux-2.6.30.noarch/drivers/pci/pcie/aspm.c 2009-07-16 22:01:30.000000000 +0100 +@@ -65,7 +65,7 @@ static LIST_HEAD(link_list); + #define POLICY_DEFAULT 0 /* BIOS default setting */ + #define POLICY_PERFORMANCE 1 /* high performance */ + #define POLICY_POWERSAVE 2 /* high power saving */ +-static int aspm_policy; ++static int aspm_policy = POLICY_POWERSAVE; + static const char *policy_str[] = { + [POLICY_DEFAULT] = "default", + [POLICY_PERFORMANCE] = "performance", diff --git a/linux-2.6-defaults-pci_no_msi.patch b/linux-2.6-defaults-pci_no_msi.patch new file mode 100644 index 000000000..9f49321b8 --- /dev/null +++ b/linux-2.6-defaults-pci_no_msi.patch @@ -0,0 +1,110 @@ +From 14bdd0d36f5284108468bb73afd50726b07c7a84 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 29 Mar 2010 23:43:49 -0400 +Subject: linux-2.6-defaults-pci_no_msi + +--- + Documentation/kernel-parameters.txt | 3 +++ + drivers/pci/Kconfig | 12 ++++++++++++ + drivers/pci/msi.c | 9 +++++++++ + drivers/pci/pci.c | 2 ++ + drivers/pci/pci.h | 2 ++ + 5 files changed, 28 insertions(+), 0 deletions(-) + +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index e4cbca5..8154a0f 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -1911,6 +1911,9 @@ and is between 256 and 4096 characters. It is defined in the file + check_enable_amd_mmconf [X86] check for and enable + properly configured MMIO access to PCI + config space on AMD family 10h CPU ++ msi [MSI] If the PCI_MSI kernel config parameter is ++ enabled, this kernel boot option can be used to ++ enable the use of MSI interrupts system-wide. + nomsi [MSI] If the PCI_MSI kernel config parameter is + enabled, this kernel boot option can be used to + disable the use of MSI interrupts system-wide. +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 7858a11..b12fcad 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -21,6 +21,18 @@ config PCI_MSI + + If you don't know what to do here, say N. + ++config PCI_MSI_DEFAULT_ON ++ def_bool y ++ prompt "Use Message Signaled Interrupts by default" ++ depends on PCI_MSI ++ help ++ Selecting this option will enable use of PCI MSI where applicable ++ by default. Support for MSI can be disabled through the use of the ++ pci=nomsi boot flag. Conversely, if this option is not selected, ++ support for PCI MSI can be enabled by passing the pci=msi flag. ++ ++ If you don't know what to do here, say N. ++ + config PCI_DEBUG + bool "PCI Debugging" + depends on PCI && DEBUG_KERNEL +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index f9cf317..6b0539a 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -22,7 +22,11 @@ + #include "pci.h" + #include "msi.h" + ++#ifdef CONFIG_PCI_MSI_DEFAULT_ON + static int pci_msi_enable = 1; ++#else ++static int pci_msi_enable = 0; ++#endif /*CONFIG_PCI_MSI_DEFAULT_ON*/ + + /* Arch hooks */ + +@@ -836,6 +840,11 @@ int pci_msi_enabled(void) + } + EXPORT_SYMBOL(pci_msi_enabled); + ++void pci_yes_msi(void) ++{ ++ pci_msi_enable = 1; ++} ++ + void pci_msi_init_pci_dev(struct pci_dev *dev) + { + INIT_LIST_HEAD(&dev->msi_list); +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 1531f3a..3cb332b 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -2983,6 +2983,8 @@ static int __init pci_setup(char *str) + if (*str && (str = pcibios_setup(str)) && *str) { + if (!strcmp(str, "nomsi")) { + pci_no_msi(); ++ } else if (!strcmp(str, "msi")) { ++ pci_yes_msi(); + } else if (!strcmp(str, "noaer")) { + pci_no_aer(); + } else if (!strcmp(str, "nodomains")) { +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h +index 4eb10f4..caa051e 100644 +--- a/drivers/pci/pci.h ++++ b/drivers/pci/pci.h +@@ -122,9 +122,11 @@ extern unsigned int pci_pm_d3_delay; + + #ifdef CONFIG_PCI_MSI + void pci_no_msi(void); ++void pci_yes_msi(void); + extern void pci_msi_init_pci_dev(struct pci_dev *dev); + #else + static inline void pci_no_msi(void) { } ++static inline void pci_yes_msi(void) { } + static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } + #endif + +-- +1.7.0.1 + diff --git a/linux-2.6-driver-level-usb-autosuspend.diff b/linux-2.6-driver-level-usb-autosuspend.diff new file mode 100644 index 000000000..00384ccf5 --- /dev/null +++ b/linux-2.6-driver-level-usb-autosuspend.diff @@ -0,0 +1,69 @@ +commit 7d0d20a25c6f477fb198b85510c78156d7d7c5af +Author: Matthew Garrett +Date: Tue Jun 9 20:11:47 2009 +0100 + + usb: Allow drivers to enable USB autosuspend on a per-device basis + + USB autosuspend is currently only enabled by default for hubs. On other + hardware the decision is made by userspace. This is unnecessary in cases + where we know that the hardware supports autosuspend, so this patch adds + a function to allow drivers to enable it at probe time. + + Signed-off-by: Matthew Garrett + +--- + drivers/usb/core/driver.c | 16 ++++++++++++++++ + include/linux/usb.h | 4 ++++ + 2 files changed, 20 insertions(+), 0 deletions(-) + +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c +index 60a45f1..03e0228 100644 +--- a/drivers/usb/core/driver.c ++++ b/drivers/usb/core/driver.c +@@ -1648,6 +1648,21 @@ void usb_autopm_put_interface_async(struct usb_interface *intf) + EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); + + /** ++ * usb_device_autosuspend_enable - enable autosuspend on a device ++ * @udev: the usb_device to be autosuspended ++ * ++ * This routine should be called by an interface driver when it knows that ++ * the device in question supports USB autosuspend. ++ * ++ */ ++void usb_device_autosuspend_enable(struct usb_device *udev) ++{ ++ udev->autosuspend_disabled = 0; ++ usb_external_suspend_device(udev, PMSG_USER_SUSPEND); ++} ++EXPORT_SYMBOL_GPL(usb_device_autosuspend_enable); ++ ++/** + * usb_autopm_get_interface - increment a USB interface's PM-usage counter + * @intf: the usb_interface whose counter should be incremented + * +diff --git a/include/linux/usb.h b/include/linux/usb.h +index e101a2d..dd47590 100644 +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -540,6 +540,7 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); + + /* USB autosuspend and autoresume */ + #ifdef CONFIG_USB_SUSPEND ++extern void usb_device_autosuspend_enable(struct usb_device *udev); + extern int usb_autopm_get_interface(struct usb_interface *intf); + extern void usb_autopm_put_interface(struct usb_interface *intf); + extern int usb_autopm_get_interface_async(struct usb_interface *intf); +@@ -563,6 +564,9 @@ static inline void usb_mark_last_busy(struct usb_device *udev) + + #else + ++static inline void usb_device_autosuspend_enable(struct usb_device *udev) ++{ } ++ + static inline int usb_autopm_get_interface(struct usb_interface *intf) + { return 0; } + static inline int usb_autopm_get_interface_async(struct usb_interface *intf) +-- +1.6.5.2 + diff --git a/linux-2.6-enable-btusb-autosuspend.patch b/linux-2.6-enable-btusb-autosuspend.patch new file mode 100644 index 000000000..7e75341bb --- /dev/null +++ b/linux-2.6-enable-btusb-autosuspend.patch @@ -0,0 +1,18 @@ +commit 8e962bd41a2cbf7f0e55191a757b87f793a725a8 +Author: Matthew Garrett +Date: Tue Jun 9 20:47:51 2009 +0100 + + btusb: Enable autosuspend by default + +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 44bc8bb..4c33417 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -1020,6 +1020,7 @@ static int btusb_probe(struct usb_interface *intf, + } + + usb_set_intfdata(intf, data); ++ usb_device_autosuspend_enable(data->udev); + + return 0; + } diff --git a/linux-2.6-execshield.patch b/linux-2.6-execshield.patch new file mode 100644 index 000000000..f2409c290 --- /dev/null +++ b/linux-2.6-execshield.patch @@ -0,0 +1,993 @@ +From 5006dd0fae6126c149868102c100cd90a20ef2e3 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 29 Mar 2010 23:20:18 -0400 +Subject: execshield + +cebbert@redhat.com: added fix for bz#220892 + +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h +index 617bd56..526248d 100644 +--- a/arch/x86/include/asm/desc.h ++++ b/arch/x86/include/asm/desc.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + static inline void fill_ldt(struct desc_struct *desc, + const struct user_desc *info) +@@ -93,6 +94,9 @@ static inline int desc_empty(const void *ptr) + + #define load_TLS(t, cpu) native_load_tls(t, cpu) + #define set_ldt native_set_ldt ++#ifdef CONFIG_X86_32 ++#define load_user_cs_desc native_load_user_cs_desc ++#endif /*CONFIG_X86_32*/ + + #define write_ldt_entry(dt, entry, desc) \ + native_write_ldt_entry(dt, entry, desc) +@@ -392,4 +396,25 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); + } + ++#ifdef CONFIG_X86_32 ++static inline void set_user_cs(struct desc_struct *desc, unsigned long limit) ++{ ++ limit = (limit - 1) / PAGE_SIZE; ++ desc->a = limit & 0xffff; ++ desc->b = (limit & 0xf0000) | 0x00c0fb00; ++} ++ ++static inline void native_load_user_cs_desc(int cpu, struct mm_struct *mm) ++{ ++ get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs; ++} ++ ++#define arch_add_exec_range arch_add_exec_range ++#define arch_remove_exec_range arch_remove_exec_range ++#define arch_flush_exec_range arch_flush_exec_range ++extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit); ++extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit); ++extern void arch_flush_exec_range(struct mm_struct *mm); ++#endif /* CONFIG_X86_32 */ ++ + #endif /* _ASM_X86_DESC_H */ +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h +index 80a1dee..8314c66 100644 +--- a/arch/x86/include/asm/mmu.h ++++ b/arch/x86/include/asm/mmu.h +@@ -7,12 +7,19 @@ + /* + * The x86 doesn't have a mmu context, but + * we put the segment information here. ++ * ++ * exec_limit is used to track the range PROT_EXEC ++ * mappings span. + */ + typedef struct { + void *ldt; + int size; + struct mutex lock; + void *vdso; ++#ifdef CONFIG_X86_32 ++ struct desc_struct user_cs; ++ unsigned long exec_limit; ++#endif + } mm_context_t; + + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h +index 5653f43..55dadb2 100644 +--- a/arch/x86/include/asm/paravirt.h ++++ b/arch/x86/include/asm/paravirt.h +@@ -289,6 +289,12 @@ static inline void set_ldt(const void *addr, unsigned entries) + { + PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); + } ++#ifdef CONFIG_X86_32 ++static inline void load_user_cs_desc(unsigned int cpu, struct mm_struct *mm) ++{ ++ PVOP_VCALL2(pv_cpu_ops.load_user_cs_desc, cpu, mm); ++} ++#endif /*CONFIG_X86_32*/ + static inline void store_gdt(struct desc_ptr *dtr) + { + PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h +index db9ef55..19c2793 100644 +--- a/arch/x86/include/asm/paravirt_types.h ++++ b/arch/x86/include/asm/paravirt_types.h +@@ -118,6 +118,9 @@ struct pv_cpu_ops { + void (*store_gdt)(struct desc_ptr *); + void (*store_idt)(struct desc_ptr *); + void (*set_ldt)(const void *desc, unsigned entries); ++#ifdef CONFIG_X86_32 ++ void (*load_user_cs_desc)(int cpu, struct mm_struct *mm); ++#endif + unsigned long (*store_tr)(void); + void (*load_tls)(struct thread_struct *t, unsigned int cpu); + #ifdef CONFIG_X86_64 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index b753ea5..4893156 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -162,6 +162,9 @@ static inline int hlt_works(int cpu) + + #define cache_line_size() (boot_cpu_data.x86_cache_alignment) + ++#define __HAVE_ARCH_ALIGN_STACK ++extern unsigned long arch_align_stack(unsigned long sp); ++ + extern void cpu_detect(struct cpuinfo_x86 *c); + + extern struct pt_regs *idle_regs(struct pt_regs *); +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 4868e4a..6c8d2ca 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -802,6 +802,20 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) + /* Filter out anything that depends on CPUID levels we don't have */ + filter_cpuid_features(c, true); + ++ /* ++ * emulation of NX with segment limits unfortunately means ++ * we have to disable the fast system calls, due to the way that ++ * sysexit clears the segment limits on return. ++ * If we have either disabled exec-shield on the boot command line, ++ * or we have NX, then we don't need to do this. ++ */ ++ if (exec_shield != 0) { ++#ifdef CONFIG_X86_PAE ++ if (!test_cpu_cap(c, X86_FEATURE_NX)) ++#endif ++ clear_cpu_cap(c, X86_FEATURE_SEP); ++ } ++ + /* If the model name is still unset, do table lookup. */ + if (!c->x86_model_id[0]) { + const char *p; +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index 1db183e..238b97d 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -345,6 +345,9 @@ struct pv_cpu_ops pv_cpu_ops = { + .read_tscp = native_read_tscp, + .load_tr_desc = native_load_tr_desc, + .set_ldt = native_set_ldt, ++#ifdef CONFIG_X86_32 ++ .load_user_cs_desc = native_load_user_cs_desc, ++#endif /*CONFIG_X86_32*/ + .load_gdt = native_load_gdt, + .load_idt = native_load_idt, + .store_gdt = native_store_gdt, +diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c +index f6c6266..8ac2589 100644 +--- a/arch/x86/kernel/process_32.c ++++ b/arch/x86/kernel/process_32.c +@@ -251,7 +251,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, + void + start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) + { ++ int cpu; ++ + set_user_gs(regs, 0); ++ + regs->fs = 0; + set_fs(USER_DS); + regs->ds = __USER_DS; +@@ -260,6 +263,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) + regs->cs = __USER_CS; + regs->ip = new_ip; + regs->sp = new_sp; ++ ++ cpu = get_cpu(); ++ load_user_cs_desc(cpu, current->mm); ++ put_cpu(); ++ + /* + * Free the old FP and other extended state + */ +@@ -319,6 +327,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + if (preload_fpu) + prefetch(next->xstate); + ++ if (next_p->mm) ++ load_user_cs_desc(cpu, next_p->mm); ++ + /* + * Reload esp0. + */ +@@ -412,3 +423,40 @@ unsigned long get_wchan(struct task_struct *p) + return 0; + } + ++static void modify_cs(struct mm_struct *mm, unsigned long limit) ++{ ++ mm->context.exec_limit = limit; ++ set_user_cs(&mm->context.user_cs, limit); ++ if (mm == current->mm) { ++ int cpu; ++ ++ cpu = get_cpu(); ++ load_user_cs_desc(cpu, mm); ++ put_cpu(); ++ } ++} ++ ++void arch_add_exec_range(struct mm_struct *mm, unsigned long limit) ++{ ++ if (limit > mm->context.exec_limit) ++ modify_cs(mm, limit); ++} ++ ++void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end) ++{ ++ struct vm_area_struct *vma; ++ unsigned long limit = PAGE_SIZE; ++ ++ if (old_end == mm->context.exec_limit) { ++ for (vma = mm->mmap; vma; vma = vma->vm_next) ++ if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit)) ++ limit = vma->vm_end; ++ modify_cs(mm, limit); ++ } ++} ++ ++void arch_flush_exec_range(struct mm_struct *mm) ++{ ++ mm->context.exec_limit = 0; ++ set_user_cs(&mm->context.user_cs, 0); ++} +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 1168e44..c452918 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -115,6 +115,76 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) + if (!user_mode_vm(regs)) + die(str, regs, err); + } ++ ++static inline int ++__compare_user_cs_desc(const struct desc_struct *desc1, ++ const struct desc_struct *desc2) ++{ ++ return ((desc1->limit0 != desc2->limit0) || ++ (desc1->limit != desc2->limit) || ++ (desc1->base0 != desc2->base0) || ++ (desc1->base1 != desc2->base1) || ++ (desc1->base2 != desc2->base2)); ++} ++ ++/* ++ * lazy-check for CS validity on exec-shield binaries: ++ * ++ * the original non-exec stack patch was written by ++ * Solar Designer . Thanks! ++ */ ++static int ++check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code) ++{ ++ struct desc_struct *desc1, *desc2; ++ struct vm_area_struct *vma; ++ unsigned long limit; ++ ++ if (current->mm == NULL) ++ return 0; ++ ++ limit = -1UL; ++ if (current->mm->context.exec_limit != -1UL) { ++ limit = PAGE_SIZE; ++ spin_lock(¤t->mm->page_table_lock); ++ for (vma = current->mm->mmap; vma; vma = vma->vm_next) ++ if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit)) ++ limit = vma->vm_end; ++ vma = get_gate_vma(current); ++ if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit)) ++ limit = vma->vm_end; ++ spin_unlock(¤t->mm->page_table_lock); ++ if (limit >= TASK_SIZE) ++ limit = -1UL; ++ current->mm->context.exec_limit = limit; ++ } ++ set_user_cs(¤t->mm->context.user_cs, limit); ++ ++ desc1 = ¤t->mm->context.user_cs; ++ desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS; ++ ++ if (__compare_user_cs_desc(desc1, desc2)) { ++ /* ++ * The CS was not in sync - reload it and retry the ++ * instruction. If the instruction still faults then ++ * we won't hit this branch next time around. ++ */ ++ if (print_fatal_signals >= 2) { ++ printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n", ++ error_code, error_code/8, regs->ip, ++ smp_processor_id()); ++ printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n", ++ current->mm->context.exec_limit, ++ desc1->a, desc1->b, desc2->a, desc2->b); ++ } ++ ++ load_user_cs_desc(cpu, current->mm); ++ ++ return 1; ++ } ++ ++ return 0; ++} + #endif + + static void __kprobes +@@ -273,6 +343,29 @@ do_general_protection(struct pt_regs *regs, long error_code) + if (!user_mode(regs)) + goto gp_in_kernel; + ++#ifdef CONFIG_X86_32 ++{ ++ int cpu; ++ int ok; ++ ++ cpu = get_cpu(); ++ ok = check_lazy_exec_limit(cpu, regs, error_code); ++ put_cpu(); ++ ++ if (ok) ++ return; ++ ++ if (print_fatal_signals) { ++ printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n", ++ error_code, error_code/8, regs->ip, smp_processor_id()); ++ printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x.\n", ++ current->mm->context.exec_limit, ++ current->mm->context.user_cs.a, ++ current->mm->context.user_cs.b); ++ } ++} ++#endif /*CONFIG_X86_32*/ ++ + tsk->thread.error_code = error_code; + tsk->thread.trap_no = 13; + +@@ -863,19 +956,37 @@ do_device_not_available(struct pt_regs *regs, long error_code) + } + + #ifdef CONFIG_X86_32 ++/* ++ * The fixup code for errors in iret jumps to here (iret_exc). It loses ++ * the original trap number and erorr code. The bogus trap 32 and error ++ * code 0 are what the vanilla kernel delivers via: ++ * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) ++ * ++ * NOTE: Because of the final "1" in the macro we need to enable interrupts. ++ * ++ * In case of a general protection fault in the iret instruction, we ++ * need to check for a lazy CS update for exec-shield. ++ */ + dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) + { +- siginfo_t info; ++ int ok; ++ int cpu; ++ + local_irq_enable(); + +- info.si_signo = SIGILL; +- info.si_errno = 0; +- info.si_code = ILL_BADSTK; +- info.si_addr = NULL; +- if (notify_die(DIE_TRAP, "iret exception", +- regs, error_code, 32, SIGILL) == NOTIFY_STOP) +- return; +- do_trap(32, SIGILL, "iret exception", regs, error_code, &info); ++ cpu = get_cpu(); ++ ok = check_lazy_exec_limit(cpu, regs, error_code); ++ put_cpu(); ++ ++ if (!ok && notify_die(DIE_TRAP, "iret exception", regs, ++ error_code, 32, SIGSEGV) != NOTIFY_STOP) { ++ siginfo_t info; ++ info.si_signo = SIGSEGV; ++ info.si_errno = 0; ++ info.si_code = ILL_BADSTK; ++ info.si_addr = 0; ++ do_trap(32, SIGSEGV, "iret exception", regs, error_code, &info); ++ } + } + #endif + +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 1dab519..360f39d 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -124,13 +124,16 @@ static unsigned long mmap_legacy_base(void) + */ + void arch_pick_mmap_layout(struct mm_struct *mm) + { +- if (mmap_is_legacy()) { ++ if (!(2 & exec_shield) && mmap_is_legacy()) { + mm->mmap_base = mmap_legacy_base(); + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; ++ if (!(current->personality & READ_IMPLIES_EXEC) ++ && mmap_is_ia32()) ++ mm->get_unmapped_exec_area = arch_get_unmapped_exec_area; + mm->unmap_area = arch_unmap_area_topdown; + } + } +diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c +index a3250aa..e0d9cce 100644 +--- a/arch/x86/mm/setup_nx.c ++++ b/arch/x86/mm/setup_nx.c +@@ -1,3 +1,4 @@ ++#include + #include + #include + #include +@@ -23,6 +24,7 @@ static int __init noexec_setup(char *str) + disable_nx = 0; + } else if (!strncmp(str, "off", 3)) { + disable_nx = 1; ++ exec_shield = 0; + } + x86_configure_nx(); + return 0; +@@ -40,6 +42,10 @@ void __cpuinit x86_configure_nx(void) + void __init x86_report_nx(void) + { + if (!cpu_has_nx) { ++ if (exec_shield) ++ printk(KERN_INFO "Using x86 segment limits to approximate NX protection\n"); ++ else ++ + printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " + "missing in CPU or disabled in BIOS!\n"); + } else { +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 426f3a1..e0286b1 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -6,6 +6,7 @@ + #include + #include + ++#include + #include + #include + #include +@@ -131,6 +132,12 @@ void smp_invalidate_interrupt(struct pt_regs *regs) + union smp_flush_state *f; + + cpu = smp_processor_id(); ++ ++#ifdef CONFIG_X86_32 ++ if (current->active_mm) ++ load_user_cs_desc(cpu, current->active_mm); ++#endif ++ + /* + * orig_rax contains the negated interrupt vector. + * Use that to determine where the sender put the data. +diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c +index 02b442e..957bb67 100644 +--- a/arch/x86/vdso/vdso32-setup.c ++++ b/arch/x86/vdso/vdso32-setup.c +@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + if (compat) + addr = VDSO_HIGH_BASE; + else { +- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); ++ addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index b607239..e426a3f 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -334,6 +334,24 @@ static void xen_set_ldt(const void *addr, unsigned entries) + xen_mc_issue(PARAVIRT_LAZY_CPU); + } + ++#ifdef CONFIG_X86_32 ++static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm) ++{ ++ void *gdt; ++ xmaddr_t mgdt; ++ u64 descriptor; ++ struct desc_struct user_cs; ++ ++ gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS]; ++ mgdt = virt_to_machine(gdt); ++ ++ user_cs = mm->context.user_cs; ++ descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32; ++ ++ HYPERVISOR_update_descriptor(mgdt.maddr, descriptor); ++} ++#endif /*CONFIG_X86_32*/ ++ + static void xen_load_gdt(const struct desc_ptr *dtr) + { + unsigned long va = dtr->address; +@@ -960,6 +978,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { + + .load_tr_desc = paravirt_nop, + .set_ldt = xen_set_ldt, ++#ifdef CONFIG_X86_32 ++ .load_user_cs_desc = xen_load_user_cs_desc, ++#endif /*CONFIG_X86_32*/ + .load_gdt = xen_load_gdt, + .load_idt = xen_load_idt, + .load_tls = xen_load_tls, +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 535e763..d114af6 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -74,7 +74,7 @@ static struct linux_binfmt elf_format = { + .hasvdso = 1 + }; + +-#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) ++#define BAD_ADDR(x) IS_ERR_VALUE(x) + + static int set_brk(unsigned long start, unsigned long end) + { +@@ -701,6 +701,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + break; + } + ++ if (current->personality == PER_LINUX && (exec_shield & 2)) { ++ executable_stack = EXSTACK_DISABLE_X; ++ current->flags |= PF_RANDOMIZE; ++ } ++ + /* Some simple consistency checks for the interpreter */ + if (elf_interpreter) { + retval = -ELIBBAD; +@@ -717,6 +722,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + if (retval) + goto out_free_dentry; + ++#ifdef CONFIG_X86_32 ++ /* ++ * Turn off the CS limit completely if exec-shield disabled or ++ * NX active: ++ */ ++ if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || (__supported_pte_mask & _PAGE_NX)) ++ arch_add_exec_range(current->mm, -1); ++#endif ++ + /* OK, This is the point of no return */ + current->flags &= ~PF_FORKNOEXEC; + current->mm->def_flags = def_flags; +@@ -724,7 +738,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + /* Do this immediately, since STACK_TOP as used in setup_arg_pages + may depend on the personality. */ + SET_PERSONALITY(loc->elf_ex); +- if (elf_read_implies_exec(loc->elf_ex, executable_stack)) ++ if (!(exec_shield & 2) && ++ elf_read_implies_exec(loc->elf_ex, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; + + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +@@ -890,7 +905,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) + interpreter, + &interp_map_addr, + load_bias); +- if (!IS_ERR((void *)elf_entry)) { ++ if (!BAD_ADDR(elf_entry)) { + /* + * load_elf_interp() returns relocation + * adjustment +diff --git a/include/linux/mm.h b/include/linux/mm.h +index e70f21b..44e6d63 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1259,7 +1259,13 @@ extern int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, struct page **pages); + +-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); ++extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int); ++ ++static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr, ++ unsigned long len, unsigned long pgoff, unsigned long flags) ++{ ++ return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0); ++} + + extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index b8bb9a6..f478e39 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -227,6 +227,9 @@ struct mm_struct { + unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); ++ unsigned long (*get_unmapped_exec_area) (struct file *filp, ++ unsigned long addr, unsigned long len, ++ unsigned long pgoff, unsigned long flags); + void (*unmap_area) (struct mm_struct *mm, unsigned long addr); + #endif + unsigned long mmap_base; /* base of mmap area */ +diff --git a/include/linux/resource.h b/include/linux/resource.h +index f1e914e..d2aef9a 100644 +--- a/include/linux/resource.h ++++ b/include/linux/resource.h +@@ -53,8 +53,11 @@ struct rlimit { + /* + * Limit the stack by to some sane default: root can always + * increase this limit if needed.. 8MB seems reasonable. ++ * ++ * (2MB more to cover randomization effects.) + */ +-#define _STK_LIM (8*1024*1024) ++#define _STK_LIM (10*1024*1024) ++#define EXEC_STACK_BIAS (2*1024*1024) + + /* + * GPG2 wants 64kB of mlocked memory, to make sure pass phrases +diff --git a/include/linux/sched.h b/include/linux/sched.h +index dad7f66..c5a3948 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -102,6 +102,9 @@ struct fs_struct; + struct bts_context; + struct perf_event_context; + ++extern int exec_shield; ++extern int print_fatal_signals; ++ + /* + * List of flags we want to share for kernel threads, + * if only because they are not used by them anyway. +@@ -390,6 +393,10 @@ extern void arch_pick_mmap_layout(struct mm_struct *mm); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); ++ ++extern unsigned long ++arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long, ++ unsigned long, unsigned long); + extern unsigned long + arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 8686b0f..a4fad81 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -99,6 +99,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max; + #ifndef CONFIG_MMU + extern int sysctl_nr_trim_pages; + #endif ++ ++int exec_shield = (1<<0); ++/* exec_shield is a bitmask: ++ * 0: off; vdso at STACK_TOP, 1 page below TASK_SIZE ++ * (1<<0) 1: on [also on if !=0] ++ * (1<<1) 2: force noexecstack regardless of PT_GNU_STACK ++ * The old settings ++ * (1<<2) 4: vdso just below .text of main (unless too low) ++ * (1<<3) 8: vdso just below .text of PT_INTERP (unless too low) ++ * are ignored because the vdso is placed completely randomly ++ */ ++ ++static int __init setup_exec_shield(char *str) ++{ ++ get_option(&str, &exec_shield); ++ ++ return 1; ++} ++__setup("exec-shield=", setup_exec_shield); ++ + #ifdef CONFIG_BLOCK + extern int blk_iopoll_enabled; + #endif +@@ -400,6 +420,14 @@ static struct ctl_table kern_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec, + }, ++ { ++ .procname = "exec-shield", ++ .data = &exec_shield, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec, ++ }, ++ + #ifdef CONFIG_PROC_SYSCTL + { + .procname = "tainted", +diff --git a/mm/mmap.c b/mm/mmap.c +index 75557c6..8173284 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -44,6 +45,18 @@ + #define arch_rebalance_pgtables(addr, len) (addr) + #endif + ++/* No sane architecture will #define these to anything else */ ++#ifndef arch_add_exec_range ++#define arch_add_exec_range(mm, limit) do { ; } while (0) ++#endif ++#ifndef arch_flush_exec_range ++#define arch_flush_exec_range(mm) do { ; } while (0) ++#endif ++#ifndef arch_remove_exec_range ++#define arch_remove_exec_range(mm, limit) do { ; } while (0) ++#endif ++ ++ + static void unmap_region(struct mm_struct *mm, + struct vm_area_struct *vma, struct vm_area_struct *prev, + unsigned long start, unsigned long end); +@@ -388,6 +401,8 @@ static inline void + __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, + struct vm_area_struct *prev, struct rb_node *rb_parent) + { ++ if (vma->vm_flags & VM_EXEC) ++ arch_add_exec_range(mm, vma->vm_end); + if (prev) { + vma->vm_next = prev->vm_next; + prev->vm_next = vma; +@@ -489,6 +504,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, + rb_erase(&vma->vm_rb, &mm->mm_rb); + if (mm->mmap_cache == vma) + mm->mmap_cache = prev; ++ if (vma->vm_flags & VM_EXEC) ++ arch_remove_exec_range(mm, vma->vm_end); + } + + /* +@@ -798,6 +815,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + } else /* cases 2, 5, 7 */ + err = vma_adjust(prev, prev->vm_start, + end, prev->vm_pgoff, NULL); ++ if (prev->vm_flags & VM_EXEC) ++ arch_add_exec_range(mm, prev->vm_end); + if (err) + return NULL; + return prev; +@@ -952,7 +971,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ +- addr = get_unmapped_area(file, addr, len, pgoff, flags); ++ addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, ++ prot & PROT_EXEC); + if (addr & ~PAGE_MASK) + return addr; + +@@ -1504,8 +1524,8 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) + } + + unsigned long +-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, +- unsigned long pgoff, unsigned long flags) ++get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len, ++ unsigned long pgoff, unsigned long flags, int exec) + { + unsigned long (*get_area)(struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); +@@ -1518,7 +1538,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + if (len > TASK_SIZE) + return -ENOMEM; + +- get_area = current->mm->get_unmapped_area; ++ if (exec && current->mm->get_unmapped_exec_area) ++ get_area = current->mm->get_unmapped_exec_area; ++ else ++ get_area = current->mm->get_unmapped_area; ++ + if (file && file->f_op && file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + addr = get_area(file, addr, len, pgoff, flags); +@@ -1532,8 +1556,83 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + + return arch_rebalance_pgtables(addr, len); + } ++EXPORT_SYMBOL(get_unmapped_area_prot); ++ ++static bool should_randomize(void) ++{ ++ return (current->flags & PF_RANDOMIZE) && ++ !(current->personality & ADDR_NO_RANDOMIZE); ++} ++ ++#define SHLIB_BASE 0x00110000 ++ ++unsigned long ++arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0, ++ unsigned long len0, unsigned long pgoff, unsigned long flags) ++{ ++ unsigned long addr = addr0, len = len0; ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ unsigned long tmp; ++ ++ if (len > TASK_SIZE) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++ if (!addr) ++ addr = !should_randomize() ? SHLIB_BASE : ++ randomize_range(SHLIB_BASE, 0x01000000, len); ++ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ vma = find_vma(mm, addr); ++ if (TASK_SIZE - len >= addr && ++ (!vma || addr + len <= vma->vm_start)) ++ return addr; ++ } ++ ++ addr = SHLIB_BASE; ++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { ++ /* At this point: (!vma || addr < vma->vm_end). */ ++ if (TASK_SIZE - len < addr) ++ return -ENOMEM; ++ ++ if (!vma || addr + len <= vma->vm_start) { ++ /* ++ * Must not let a PROT_EXEC mapping get into the ++ * brk area: ++ */ ++ if (addr + len > mm->brk) ++ goto failed; ++ ++ /* ++ * Up until the brk area we randomize addresses ++ * as much as possible: ++ */ ++ if (addr >= 0x01000000 && should_randomize()) { ++ tmp = randomize_range(0x01000000, ++ PAGE_ALIGN(max(mm->start_brk, ++ (unsigned long)0x08000000)), len); ++ vma = find_vma(mm, tmp); ++ if (TASK_SIZE - len >= tmp && ++ (!vma || tmp + len <= vma->vm_start)) ++ return tmp; ++ } ++ /* ++ * Ok, randomization didnt work out - return ++ * the result of the linear search: ++ */ ++ return addr; ++ } ++ addr = vma->vm_end; ++ } ++ ++failed: ++ return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags); ++} + +-EXPORT_SYMBOL(get_unmapped_area); + + /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ + struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +@@ -1608,6 +1707,16 @@ out: + return prev ? prev->vm_next : vma; + } + ++static int over_stack_limit(unsigned long sz) ++{ ++ struct rlimit *rlim = current->signal->rlim; ++ ++ if (sz < EXEC_STACK_BIAS) ++ return 0; ++ return (sz - EXEC_STACK_BIAS) > ++ ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur); ++} ++ + /* + * Verify that the stack growth is acceptable and + * update accounting. This is shared with both the +@@ -1624,7 +1733,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + return -ENOMEM; + + /* Stack limit test */ +- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) ++ if (over_stack_limit(size)) + return -ENOMEM; + + /* mlock limit tests */ +@@ -1936,10 +2045,14 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + if (new->vm_ops && new->vm_ops->open) + new->vm_ops->open(new); + +- if (new_below) ++ if (new_below) { ++ unsigned long old_end = vma->vm_end; ++ + err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + + ((addr - new->vm_start) >> PAGE_SHIFT), new); +- else ++ if (vma->vm_flags & VM_EXEC) ++ arch_remove_exec_range(mm, old_end); ++ } else + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + + /* Success. */ +@@ -2223,6 +2336,7 @@ void exit_mmap(struct mm_struct *mm) + + free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); + tlb_finish_mmu(tlb, 0, end); ++ arch_flush_exec_range(mm); + + /* + * Walk the list again, actually closing and freeing it, +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 8bc969d..3c9b4fc 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -26,9 +26,14 @@ + #include + #include + #include ++#include + #include + #include + ++#ifndef arch_remove_exec_range ++#define arch_remove_exec_range(mm, limit) do { ; } while (0) ++#endif ++ + #ifndef pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) + { +@@ -139,7 +144,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + struct mm_struct *mm = vma->vm_mm; + unsigned long oldflags = vma->vm_flags; + long nrpages = (end - start) >> PAGE_SHIFT; +- unsigned long charged = 0; ++ unsigned long charged = 0, old_end = vma->vm_end; + pgoff_t pgoff; + int error; + int dirty_accountable = 0; +@@ -204,6 +209,9 @@ success: + dirty_accountable = 1; + } + ++ if (oldflags & VM_EXEC) ++ arch_remove_exec_range(current->mm, old_end); ++ + mmu_notifier_invalidate_range_start(mm, start, end); + if (is_vm_hugetlb_page(vma)) + hugetlb_change_protection(vma, start, end, vma->vm_page_prot); +diff --git a/mm/mremap.c b/mm/mremap.c +index e9c75ef..0a5379f 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -488,10 +488,10 @@ unsigned long do_mremap(unsigned long addr, + if (vma->vm_flags & VM_MAYSHARE) + map_flags |= MAP_SHARED; + +- new_addr = get_unmapped_area(vma->vm_file, 0, new_len, ++ new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len, + vma->vm_pgoff + + ((addr - vma->vm_start) >> PAGE_SHIFT), +- map_flags); ++ map_flags, vma->vm_flags & VM_EXEC); + if (new_addr & ~PAGE_MASK) { + ret = new_addr; + goto out; +-- +1.7.0.1 + diff --git a/linux-2.6-firewire-git-pending.patch b/linux-2.6-firewire-git-pending.patch new file mode 100644 index 000000000..e69de29bb diff --git a/linux-2.6-firewire-git-update.patch b/linux-2.6-firewire-git-update.patch new file mode 100644 index 000000000..e69de29bb diff --git a/linux-2.6-fix-btusb-autosuspend.patch b/linux-2.6-fix-btusb-autosuspend.patch new file mode 100644 index 000000000..2dad6fe42 --- /dev/null +++ b/linux-2.6-fix-btusb-autosuspend.patch @@ -0,0 +1,18 @@ +commit ae69717118e1f14ed8737459f8c4baca1cb9c404 +Author: Matthew Garrett +Date: Wed Dec 16 14:31:30 2009 -0500 + + Fix btusb autosuspend + +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 4c33417..ec54dd6 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb) + return; + + usb_anchor_urb(urb, &data->bulk_anchor); ++ usb_mark_last_busy(data->udev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { diff --git a/linux-2.6-hotfixes.patch b/linux-2.6-hotfixes.patch new file mode 100644 index 000000000..06b57a116 --- /dev/null +++ b/linux-2.6-hotfixes.patch @@ -0,0 +1,13 @@ +fixes: +implicit declaration of function kzalloc + +--- linux-2.6.34.noarch/drivers/usb/serial/qcserial.c~ 2010-06-08 15:19:41.000000000 -0400 ++++ linux-2.6.34.noarch/drivers/usb/serial/qcserial.c 2010-06-08 15:19:47.000000000 -0400 +@@ -11,6 +11,7 @@ + * + */ + ++#include + #include + #include + #include diff --git a/linux-2.6-input-hid-quirk-egalax.patch b/linux-2.6-input-hid-quirk-egalax.patch new file mode 100644 index 000000000..db38685f0 --- /dev/null +++ b/linux-2.6-input-hid-quirk-egalax.patch @@ -0,0 +1,41 @@ +Date: Mon, 1 Feb 2010 12:53:47 +1300 +From: Peter Hutterer +To: Dave Airlie +Subject: [PATCH] HID: add multi-input quirk for eGalax Touchcontroller + +Signed-off-by: Peter Hutterer +Tested-by: Alfred Broda +--- + drivers/hid/hid-ids.h | 3 +++ + drivers/hid/usbhid/hid-quirks.c | 1 + + 2 files changed, 4 insertions(+), 0 deletions(-) + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index f5144b8..2e698a2 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -158,6 +158,9 @@ + + #define USB_VENDOR_ID_DRAGONRISE 0x0079 + ++#define USB_VENDOR_ID_EGALAX 0x0EEF ++#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 ++ + #define USB_VENDOR_ID_ELO 0x04E7 + #define USB_DEVICE_ID_ELO_TS2700 0x0020 + +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index e987562..dc27d74 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -32,6 +32,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, + { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, + { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, ++ { USB_VENDOR_ID_EGALAX, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, +-- +1.6.5.2 + diff --git a/linux-2.6-input-kill-stupid-messages.patch b/linux-2.6-input-kill-stupid-messages.patch new file mode 100644 index 000000000..cc1dd7470 --- /dev/null +++ b/linux-2.6-input-kill-stupid-messages.patch @@ -0,0 +1,32 @@ +From b2c6d55b2351152696aafb8c9bf3ec8968acf77c Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 29 Mar 2010 23:59:58 -0400 +Subject: linux-2.6-input-kill-stupid-messages + +--- + drivers/input/keyboard/atkbd.c | 5 +++++ + 1 files changed, 5 insertions(+), 0 deletions(-) + +diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c +index d358ef8..38db098 100644 +--- a/drivers/input/keyboard/atkbd.c ++++ b/drivers/input/keyboard/atkbd.c +@@ -425,11 +426,15 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data, + goto out; + case ATKBD_RET_ACK: + case ATKBD_RET_NAK: ++#if 0 ++ /* Quite a few key switchers and other tools trigger this ++ * and it confuses people who can do nothing about it */ + if (printk_ratelimit()) + dev_warn(&serio->dev, + "Spurious %s on %s. " + "Some program might be trying access hardware directly.\n", + data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys); ++#endif + goto out; + case ATKBD_RET_ERR: + atkbd->err_count++; +-- +1.7.0.1 + diff --git a/linux-2.6-intel-iommu-igfx.patch b/linux-2.6-intel-iommu-igfx.patch new file mode 100644 index 000000000..44fd14186 --- /dev/null +++ b/linux-2.6-intel-iommu-igfx.patch @@ -0,0 +1,78 @@ +Subject: [PATCH] [intel_iommu] Default to igfx_off +From: drago01 +To: fedora-kernel-list + +This option seems to causes way to many issues, it is +being investigated by Intel's chipset team for months now and +we still don't have any outcome. + +The results so far are "black screen when starting X", +"system hangs when using GL", "system does not resume". + +The patch adds an intel_iommu=igfx_on option, which makes it opt in, +rather than opt out. + +Signed-off-by: Adel Gadllah +Reviewed-by: Adam Jackson +--- + Documentation/kernel-parameters.txt | 11 +++++------ + drivers/pci/intel-iommu.c | 9 +++++---- + 2 files changed, 10 insertions(+), 10 deletions(-) + +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index e7848a0..9914485 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -992,12 +992,11 @@ and is between 256 and 4096 characters. It is defined in the file + Enable intel iommu driver. + off + Disable intel iommu driver. +- igfx_off [Default Off] +- By default, gfx is mapped as normal device. If a gfx +- device has a dedicated DMAR unit, the DMAR unit is +- bypassed by not enabling DMAR with this option. In +- this case, gfx device will use physical address for +- DMA. ++ igfx_on [Default Off] ++ By default, the gfx's DMAR unit is bypassed by not enabling ++ DMAR with this option. So the gfx device will use physical ++ address for DMA. When this option is enabled it the gfx is ++ mapped as normal device. + forcedac [x86_64] + With this option iommu will not optimize to look + for io virtual address below 32 bit forcing dual +diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c +index 4173125..8f36786 100644 +--- a/drivers/pci/intel-iommu.c ++++ b/drivers/pci/intel-iommu.c +@@ -340,7 +340,8 @@ int dmar_disabled = 0; + int dmar_disabled = 1; + #endif /*CONFIG_DMAR_DEFAULT_ON*/ + +-static int dmar_map_gfx = 1; ++/* disabled by default; causes way too many issues */ ++static int dmar_map_gfx = 0; + static int dmar_forcedac; + static int intel_iommu_strict; + +@@ -361,10 +362,10 @@ static int __init intel_iommu_setup(char *str) + } else if (!strncmp(str, "off", 3)) { + dmar_disabled = 1; + printk(KERN_INFO "Intel-IOMMU: disabled\n"); +- } else if (!strncmp(str, "igfx_off", 8)) { +- dmar_map_gfx = 0; ++ } else if (!strncmp(str, "igfx_on", 7)) { ++ dmar_map_gfx = 1; + printk(KERN_INFO +- "Intel-IOMMU: disable GFX device mapping\n"); ++ "Intel-IOMMU: enabled GFX device mapping\n"); + } else if (!strncmp(str, "forcedac", 8)) { + printk(KERN_INFO + "Intel-IOMMU: Forcing DAC for PCI devices\n"); +-- +1.6.6.1 +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/linux-2.6-mac80211-age-scan-results-on-resume.patch b/linux-2.6-mac80211-age-scan-results-on-resume.patch new file mode 100644 index 000000000..d9e9631bd --- /dev/null +++ b/linux-2.6-mac80211-age-scan-results-on-resume.patch @@ -0,0 +1,181 @@ +Backport of "cfg80211: age scan results on resume" by Dan Williams. + +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h +index 23c0ab7..0432eb6 100644 +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -450,6 +450,9 @@ struct ieee80211_channel; + * wireless extensions but this is subject to reevaluation as soon as this + * code is used more widely and we have a first user without wext. + * ++ * @suspend: wiphy device needs to be suspended ++ * @resume: wiphy device needs to be resumed ++ * + * @add_virtual_intf: create a new virtual interface with the given name, + * must set the struct wireless_dev's iftype. + * +@@ -499,6 +502,9 @@ struct ieee80211_channel; + * @set_channel: Set channel + */ + struct cfg80211_ops { ++ int (*suspend)(struct wiphy *wiphy); ++ int (*resume)(struct wiphy *wiphy); ++ + int (*add_virtual_intf)(struct wiphy *wiphy, char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); +diff --git a/include/net/wireless.h b/include/net/wireless.h +index 21c5d96..ae2d34d 100644 +--- a/include/net/wireless.h ++++ b/include/net/wireless.h +@@ -220,6 +220,9 @@ struct wiphy { + /* dir in debugfs: ieee80211/ */ + struct dentry *debugfsdir; + ++ /* time spent in suspend, in seconds */ ++ unsigned long suspend_duration; ++ + char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); + }; + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 9d4e4d8..691183e 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1141,6 +1141,32 @@ static int ieee80211_set_channel(struct wiphy *wiphy, + return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + } + ++#ifdef CONFIG_PM ++static int ieee80211_suspend(struct wiphy *wiphy) ++{ ++ return 0; ++} ++ ++static int ieee80211_resume(struct wiphy *wiphy) ++{ ++ struct ieee80211_local *local = wiphy_priv(wiphy); ++ unsigned long age_jiffies; ++ struct ieee80211_bss *bss; ++ ++ age_jiffies = msecs_to_jiffies(wiphy->suspend_duration * MSEC_PER_SEC); ++ spin_lock_bh(&local->bss_lock); ++ list_for_each_entry(bss, &local->bss_list, list) { ++ bss->last_update -= age_jiffies; ++ } ++ spin_unlock_bh(&local->bss_lock); ++ ++ return 0; ++} ++#else ++#define ieee80211_suspend NULL ++#define ieee80211_resume NULL ++#endif ++ + struct cfg80211_ops mac80211_config_ops = { + .add_virtual_intf = ieee80211_add_iface, + .del_virtual_intf = ieee80211_del_iface, +@@ -1169,4 +1195,6 @@ struct cfg80211_ops mac80211_config_ops = { + .change_bss = ieee80211_change_bss, + .set_txq_params = ieee80211_set_txq_params, + .set_channel = ieee80211_set_channel, ++ .suspend = ieee80211_suspend, ++ .resume = ieee80211_resume, + }; +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c +index f5c7c33..eb43ff5 100644 +--- a/net/mac80211/scan.c ++++ b/net/mac80211/scan.c +@@ -745,6 +745,15 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info, + } + } + ++static inline unsigned int elapsed_jiffies_msecs(unsigned long start) ++{ ++ unsigned long end = jiffies; ++ ++ if (end >= start) ++ return jiffies_to_msecs(end - start); ++ ++ return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); ++} + + static char * + ieee80211_scan_result(struct ieee80211_local *local, +@@ -857,8 +866,8 @@ ieee80211_scan_result(struct ieee80211_local *local, + &iwe, buf); + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVCUSTOM; +- sprintf(buf, " Last beacon: %dms ago", +- jiffies_to_msecs(jiffies - bss->last_update)); ++ sprintf(buf, " Last beacon: %ums ago", ++ elapsed_jiffies_msecs(bss->last_update)); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, &iwe, buf); +diff --git a/net/wireless/core.h b/net/wireless/core.h +index f7fb9f4..a4031a9 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -41,6 +41,8 @@ struct cfg80211_registered_device { + struct mutex devlist_mtx; + struct list_head netdev_list; + ++ unsigned long suspend_at; ++ + /* must be last because of the way we do wiphy_priv(), + * and it should at least be aligned to NETDEV_ALIGN */ + struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); +diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c +index 79a3828..dc92564 100644 +--- a/net/wireless/sysfs.c ++++ b/net/wireless/sysfs.c +@@ -55,6 +55,39 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) + } + #endif + ++static int wiphy_suspend(struct device *dev, pm_message_t state) ++{ ++ struct cfg80211_registered_device *rdev = dev_to_rdev(dev); ++ int ret = 0; ++ ++ rdev->wiphy.suspend_duration = 0; ++ rdev->suspend_at = get_seconds(); ++ ++ if (rdev->ops->suspend) { ++ rtnl_lock(); ++ ret = rdev->ops->suspend(&rdev->wiphy); ++ rtnl_unlock(); ++ } ++ ++ return ret; ++} ++ ++static int wiphy_resume(struct device *dev) ++{ ++ struct cfg80211_registered_device *rdev = dev_to_rdev(dev); ++ int ret = 0; ++ ++ rdev->wiphy.suspend_duration = get_seconds() - rdev->suspend_at; ++ ++ if (rdev->ops->resume) { ++ rtnl_lock(); ++ ret = rdev->ops->resume(&rdev->wiphy); ++ rtnl_unlock(); ++ } ++ ++ return ret; ++} ++ + struct class ieee80211_class = { + .name = "ieee80211", + .owner = THIS_MODULE, +@@ -63,6 +96,8 @@ struct class ieee80211_class = { + #ifdef CONFIG_HOTPLUG + .dev_uevent = wiphy_uevent, + #endif ++ .suspend = wiphy_suspend, ++ .resume = wiphy_resume, + }; + + int wiphy_sysfs_init(void) diff --git a/linux-2.6-makefile-after_link.patch b/linux-2.6-makefile-after_link.patch new file mode 100644 index 000000000..94b71f9b1 --- /dev/null +++ b/linux-2.6-makefile-after_link.patch @@ -0,0 +1,57 @@ +diff --git a/Makefile b/Makefile +index f908acc..960ff6f 100644 +--- a/Makefile ++++ b/Makefile +@@ -746,6 +746,10 @@ quiet_cmd_vmlinux__ ?= LD $@ + --start-group $(vmlinux-main) --end-group \ + $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^) + ++ifdef AFTER_LINK ++cmd_vmlinux__ += ; $(AFTER_LINK) ++endif ++ + # Generate new vmlinux version + quiet_cmd_vmlinux_version = GEN .version + cmd_vmlinux_version = set -e; \ +diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile +index 51ead52..ad21273 100644 +--- a/arch/powerpc/kernel/vdso32/Makefile ++++ b/arch/powerpc/kernel/vdso32/Makefile +@@ -41,7 +41,8 @@ $(obj-vdso32): %.o: %.S + + # actual build commands + quiet_cmd_vdso32ld = VDSO32L $@ +- cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ ++ cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ \ ++ $(if $(AFTER_LINK),; $(AFTER_LINK)) + quiet_cmd_vdso32as = VDSO32A $@ + cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< + +diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile +index 79da65d..f11c21b 100644 +--- a/arch/powerpc/kernel/vdso64/Makefile ++++ b/arch/powerpc/kernel/vdso64/Makefile +@@ -36,7 +36,8 @@ $(obj-vdso64): %.o: %.S + + # actual build commands + quiet_cmd_vdso64ld = VDSO64L $@ +- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ ++ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ \ ++ $(if $(AFTER_LINK),; $(AFTER_LINK)) + quiet_cmd_vdso64as = VDSO64A $@ + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< + +diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile +index 6b4ffed..cbc3d05 100644 +--- a/arch/x86/vdso/Makefile ++++ b/arch/x86/vdso/Makefile +@@ -120,7 +120,8 @@ $(obj)/vdso32-syms.lds: $(vdso32.so-y:%=$(obj)/vdso32-%-syms.lds) FORCE + quiet_cmd_vdso = VDSO $@ + cmd_vdso = $(CC) -nostdlib -o $@ \ + $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ +- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) ++ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) \ ++ $(if $(AFTER_LINK),; $(AFTER_LINK)) + + VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + GCOV_PROFILE := n diff --git a/linux-2.6-phylib-autoload.patch b/linux-2.6-phylib-autoload.patch new file mode 100644 index 000000000..2b9122560 --- /dev/null +++ b/linux-2.6-phylib-autoload.patch @@ -0,0 +1,403 @@ +From c413dfa59bf979475a9647cc165f547021efeb27 Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Wed, 31 Mar 2010 02:10:20 +0100 +Subject: [PATCH 1/2] phylib: Support phy module autoloading + +We don't use the normal hotplug mechanism because it doesn't work. It will +load the module some time after the device appears, but that's not good +enough for us -- we need the driver loaded _immediately_ because otherwise +the NIC driver may just abort and then the phy 'device' goes away. + +[bwh: s/phy/mdio/ in module alias, kerneldoc for struct mdio_device_id] + +Signed-off-by: David Woodhouse +--- + drivers/net/phy/phy_device.c | 12 ++++++++++++ + include/linux/mod_devicetable.h | 26 ++++++++++++++++++++++++++ + include/linux/phy.h | 1 + + scripts/mod/file2alias.c | 26 ++++++++++++++++++++++++++ + 4 files changed, 65 insertions(+), 0 deletions(-) + +From 9ddd9886cc89827a4713e9a96614148272fdaa8e Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Wed, 31 Mar 2010 02:12:06 +0100 +Subject: [PATCH 2/2] phylib: Add module table to all existing phy drivers + +Signed-off-by: David Woodhouse +--- + drivers/net/phy/bcm63xx.c | 8 ++++++++ + drivers/net/phy/broadcom.c | 16 ++++++++++++++++ + drivers/net/phy/cicada.c | 8 ++++++++ + drivers/net/phy/davicom.c | 9 +++++++++ + drivers/net/phy/et1011c.c | 7 +++++++ + drivers/net/phy/icplus.c | 7 +++++++ + drivers/net/phy/lxt.c | 8 ++++++++ + drivers/net/phy/marvell.c | 13 +++++++++++++ + drivers/net/phy/national.c | 7 +++++++ + drivers/net/phy/qsemi.c | 7 +++++++ + drivers/net/phy/realtek.c | 7 +++++++ + drivers/net/phy/smsc.c | 11 +++++++++++ + drivers/net/phy/ste10Xp.c | 8 ++++++++ + drivers/net/phy/vitesse.c | 8 ++++++++ + 14 files changed, 124 insertions(+), 0 deletions(-) + +diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c +index 4fed95e..ac5e498 100644 +--- a/drivers/net/phy/bcm63xx.c ++++ b/drivers/net/phy/bcm63xx.c +@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void) + + module_init(bcm63xx_phy_init); + module_exit(bcm63xx_phy_exit); ++ ++static struct mdio_device_id bcm63xx_tbl[] = { ++ { 0x00406000, 0xfffffc00 }, ++ { 0x002bdc00, 0xfffffc00 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, bcm64xx_tbl); +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c +index 33c4b12..f1939c3 100644 +--- a/drivers/net/phy/broadcom.c ++++ b/drivers/net/phy/broadcom.c +@@ -911,3 +911,19 @@ static void __exit broadcom_exit(void) + + module_init(broadcom_init); + module_exit(broadcom_exit); ++ ++static struct mdio_device_id broadcom_tbl[] = { ++ { 0x00206070, 0xfffffff0 }, ++ { 0x002060e0, 0xfffffff0 }, ++ { 0x002060c0, 0xfffffff0 }, ++ { 0x002060b0, 0xfffffff0 }, ++ { 0x0143bca0, 0xfffffff0 }, ++ { 0x0143bcb0, 0xfffffff0 }, ++ { PHY_ID_BCM50610, 0xfffffff0 }, ++ { PHY_ID_BCM50610M, 0xfffffff0 }, ++ { PHY_ID_BCM57780, 0xfffffff0 }, ++ { 0x0143bc70, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, broadcom_tbl); +diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c +index a1bd599..efc608f 100644 +--- a/drivers/net/phy/cicada.c ++++ b/drivers/net/phy/cicada.c +@@ -159,3 +159,11 @@ static void __exit cicada_exit(void) + + module_init(cicada_init); + module_exit(cicada_exit); ++ ++static struct mdio_device_id cicada_tbl[] = { ++ { 0x000fc410, 0x000ffff0 }, ++ { 0x000fc440, 0x000fffc0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, cicada_tbl); +diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c +index d926168..e02b18c 100644 +--- a/drivers/net/phy/davicom.c ++++ b/drivers/net/phy/davicom.c +@@ -219,3 +219,12 @@ static void __exit davicom_exit(void) + + module_init(davicom_init); + module_exit(davicom_exit); ++ ++static struct mdio_device_id davicom_tbl[] = { ++ { 0x0181b880, 0x0ffffff0 }, ++ { 0x0181b8a0, 0x0ffffff0 }, ++ { 0x00181b80, 0x0ffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, davicom_tbl); +diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c +index b031fa2..500f0fd 100644 +--- a/drivers/net/phy/et1011c.c ++++ b/drivers/net/phy/et1011c.c +@@ -111,3 +111,10 @@ static void __exit et1011c_exit(void) + + module_init(et1011c_init); + module_exit(et1011c_exit); ++ ++static struct mdio_device_id et1011c_tbl[] = { ++ { 0x0282f014, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, et1011c_tbl); +diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c +index af3f1f2..e661e90 100644 +--- a/drivers/net/phy/icplus.c ++++ b/drivers/net/phy/icplus.c +@@ -132,3 +132,10 @@ static void __exit ip175c_exit(void) + + module_init(ip175c_init); + module_exit(ip175c_exit); ++ ++static struct mdio_device_id icplus_tbl[] = { ++ { 0x02430d80, 0x0ffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, icplus_tbl); +diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c +index 4cf3324..1d94f1d 100644 +--- a/drivers/net/phy/lxt.c ++++ b/drivers/net/phy/lxt.c +@@ -174,3 +174,11 @@ static void __exit lxt_exit(void) + + module_init(lxt_init); + module_exit(lxt_exit); ++ ++static struct mdio_device_id lxt_tbl[] = { ++ { 0x78100000, 0xfffffff0 }, ++ { 0x001378e0, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, lxt_tbl); +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index 6f69b9b..4e58b2c 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -611,3 +611,16 @@ static void __exit marvell_exit(void) + + module_init(marvell_init); + module_exit(marvell_exit); ++ ++static struct mdio_device_id marvell_tbl[] = { ++ { 0x01410c60, 0xfffffff0 }, ++ { 0x01410c90, 0xfffffff0 }, ++ { 0x01410cc0, 0xfffffff0 }, ++ { 0x01410e10, 0xfffffff0 }, ++ { 0x01410cb0, 0xfffffff0 }, ++ { 0x01410cd0, 0xfffffff0 }, ++ { 0x01410e30, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, marvell_tbl); +diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c +index 6c636eb..729ab29 100644 +--- a/drivers/net/phy/national.c ++++ b/drivers/net/phy/national.c +@@ -153,3 +153,10 @@ MODULE_LICENSE("GPL"); + + module_init(ns_init); + module_exit(ns_exit); ++ ++static struct mdio_device_id ns_tbl[] = { ++ { DP83865_PHY_ID, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, ns_tbl); +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index adbc0fd..16aa083 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups); + struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) + { + struct phy_device *dev; ++ + /* We allocate the device, and initialize the + * default values */ + dev = kzalloc(sizeof(*dev), GFP_KERNEL); +@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) + mutex_init(&dev->lock); + INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); + ++ /* Request the appropriate module unconditionally; don't ++ bother trying to do so only if it isn't already loaded, ++ because that gets complicated. A hotplug event would have ++ done an unconditional modprobe anyway. ++ We don't do normal hotplug because it won't work for MDIO ++ -- because it relies on the device staying around for long ++ enough for the driver to get loaded. With MDIO, the NIC ++ driver will get bored and give up as soon as it finds that ++ there's no driver _already_ loaded. */ ++ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); ++ + return dev; + } + EXPORT_SYMBOL(phy_device_create); +diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c +index 23062d0..3ec9610 100644 +--- a/drivers/net/phy/qsemi.c ++++ b/drivers/net/phy/qsemi.c +@@ -138,3 +138,10 @@ static void __exit qs6612_exit(void) + + module_init(qs6612_init); + module_exit(qs6612_exit); ++ ++static struct mdio_device_id qs6612_tbl[] = { ++ { 0x00181440, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, qs6612_tbl); +diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c +index a052a67..f567c0e 100644 +--- a/drivers/net/phy/realtek.c ++++ b/drivers/net/phy/realtek.c +@@ -78,3 +78,10 @@ static void __exit realtek_exit(void) + + module_init(realtek_init); + module_exit(realtek_exit); ++ ++static struct mdio_device_id realtek_tbl[] = { ++ { 0x001cc912, 0x001fffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, realtek_tbl); +diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c +index 5123bb9..9fb14b4 100644 +--- a/drivers/net/phy/smsc.c ++++ b/drivers/net/phy/smsc.c +@@ -236,3 +236,14 @@ MODULE_LICENSE("GPL"); + + module_init(smsc_init); + module_exit(smsc_exit); ++ ++static struct mdio_device_id smsc_tbl[] = { ++ { 0x0007c0a0, 0xfffffff0 }, ++ { 0x0007c0b0, 0xfffffff0 }, ++ { 0x0007c0c0, 0xfffffff0 }, ++ { 0x0007c0d0, 0xfffffff0 }, ++ { 0x0007c0f0, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, smsc_tbl); +diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c +index 6bdb0d5..7229009 100644 +--- a/drivers/net/phy/ste10Xp.c ++++ b/drivers/net/phy/ste10Xp.c +@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void) + module_init(ste10Xp_init); + module_exit(ste10Xp_exit); + ++static struct mdio_device_id ste10Xp_tbl[] = { ++ { STE101P_PHY_ID, 0xfffffff0 }, ++ { STE100P_PHY_ID, 0xffffffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl); ++ + MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver"); + MODULE_AUTHOR("Giuseppe Cavallaro "); + MODULE_LICENSE("GPL"); +diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c +index dd3b244..45cce50 100644 +--- a/drivers/net/phy/vitesse.c ++++ b/drivers/net/phy/vitesse.c +@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void) + + module_init(vsc82xx_init); + module_exit(vsc82xx_exit); ++ ++static struct mdio_device_id vitesse_tbl[] = { ++ { PHY_ID_VSC8244, 0x000fffc0 }, ++ { PHY_ID_VSC8221, 0x000ffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, vitesse_tbl); +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h +index f58e9d8..55f1f9c 100644 +--- a/include/linux/mod_devicetable.h ++++ b/include/linux/mod_devicetable.h +@@ -474,4 +474,30 @@ struct platform_device_id { + __attribute__((aligned(sizeof(kernel_ulong_t)))); + }; + ++#define MDIO_MODULE_PREFIX "mdio:" ++ ++#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" ++#define MDIO_ID_ARGS(_id) \ ++ (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ ++ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ ++ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ ++ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ ++ ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \ ++ ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \ ++ ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \ ++ ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1 ++ ++/** ++ * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus ++ * @phy_id: The result of ++ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask ++ * for this PHY type ++ * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 ++ * is used to terminate an array of struct mdio_device_id. ++ */ ++struct mdio_device_id { ++ __u32 phy_id; ++ __u32 phy_id_mask; ++}; ++ + #endif /* LINUX_MOD_DEVICETABLE_H */ +diff --git a/include/linux/phy.h b/include/linux/phy.h +index 6a7eb40..8d5715a 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #include + +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c +index 220213e..36a60a8 100644 +--- a/scripts/mod/file2alias.c ++++ b/scripts/mod/file2alias.c +@@ -796,6 +796,28 @@ static int do_platform_entry(const char *filename, + return 1; + } + ++static int do_mdio_entry(const char *filename, ++ struct mdio_device_id *id, char *alias) ++{ ++ int i; ++ ++ alias += sprintf(alias, MDIO_MODULE_PREFIX); ++ ++ for (i = 0; i < 32; i++) { ++ if (!((id->phy_id_mask >> (31-i)) & 1)) ++ *(alias++) = '?'; ++ else if ((id->phy_id >> (31-i)) & 1) ++ *(alias++) = '1'; ++ else ++ *(alias++) = '0'; ++ } ++ ++ /* Terminate the string */ ++ *alias = 0; ++ ++ return 1; ++} ++ + /* Ignore any prefix, eg. some architectures prepend _ */ + static inline int sym_is(const char *symbol, const char *name) + { +@@ -943,6 +965,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, + do_table(symval, sym->st_size, + sizeof(struct platform_device_id), "platform", + do_platform_entry, mod); ++ else if (sym_is(symname, "__mod_mdio_device_table")) ++ do_table(symval, sym->st_size, ++ sizeof(struct mdio_device_id), "mdio", ++ do_mdio_entry, mod); + free(zeros); + } + diff --git a/linux-2.6-selinux-mprotect-checks.patch b/linux-2.6-selinux-mprotect-checks.patch new file mode 100644 index 000000000..010a63c43 --- /dev/null +++ b/linux-2.6-selinux-mprotect-checks.patch @@ -0,0 +1,124 @@ +This needs a fixed toolchain, and a userspace rebuild to work. +For these reasons, it's had difficulty getting upstream. + +ie, Fedora has a new enough toolchain, and has been rebuilt, so we don't need +the ifdefs. Other distros don't/haven't, and this patch would break them +if pushed upstream. + + +Subject: [Fwd: Re: [PATCH] Disable execmem for sparc] +From: Stephen Smalley +To: Dave Jones +Date: Wed, 28 Apr 2010 16:04:56 -0400 +Message-Id: <1272485096.6013.326.camel@moss-pluto.epoch.ncsc.mil> + +-------- Forwarded Message -------- +From: Stephen Smalley +To: David Miller +Cc: tcallawa@redhat.com, dennis@ausil.us, sparclinux@vger.kernel.org, dgilmore@redhat.com, jmorris@namei.org, eparis@parisplace.org +Subject: Re: [PATCH] Disable execmem for sparc +Date: Wed, 28 Apr 2010 15:57:57 -0400 + +On Tue, 2010-04-27 at 11:47 -0700, David Miller wrote: +> From: "Tom \"spot\" Callaway" +> Date: Tue, 27 Apr 2010 14:20:21 -0400 +> +> > [root@apollo ~]$ cat /proc/2174/maps +> > 00010000-00014000 r-xp 00000000 fd:00 15466577 +> > /sbin/mingetty +> > 00022000-00024000 rwxp 00002000 fd:00 15466577 +> > /sbin/mingetty +> > 00024000-00046000 rwxp 00000000 00:00 0 +> > [heap] +> +> SELINUX probably barfs on the executable heap, the PLT is in the HEAP +> just like powerpc32 and that's why VM_DATA_DEFAULT_FLAGS has to set +> both executable and writable. +> +> You also can't remove the CONFIG_PPC32 ifdefs in selinux, since +> because of the VM_DATA_DEFAULT_FLAGS setting used still in that arch, +> the heap will always have executable permission, just like sparc does. +> You have to support those binaries forever, whether you like it or not. +> +> Let's just replace the CONFIG_PPC32 ifdef in SELINUX with CONFIG_PPC32 +> || CONFIG_SPARC as in Tom's original patch and let's be done with +> this. +> +> In fact I would go through all the arch/ header files and check the +> VM_DATA_DEFAULT_FLAGS settings and add the necessary new ifdefs to the +> SELINUX code so that other platforms don't have the pain of having to +> go through this process too. + +To avoid maintaining per-arch ifdefs, it seems that we could just +directly use (VM_DATA_DEFAULT_FLAGS & VM_EXEC) as the basis for deciding +whether to enable or disable these checks. VM_DATA_DEFAULT_FLAGS isn't +constant on some architectures but instead depends on +current->personality, but we want this applied uniformly. So we'll just +use the initial task state to determine whether or not to enable these +checks. + +Signed-off-by: Stephen Smalley + +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index ebee467..a03fd74 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -2999,13 +2999,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, + return file_has_perm(cred, file, av); + } + ++static int default_noexec; ++ + static int file_map_prot_check(struct file *file, unsigned long prot, int shared) + { + const struct cred *cred = current_cred(); + int rc = 0; + +-#ifndef CONFIG_PPC32 +- if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { ++ if (default_noexec && ++ (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { + /* + * We are making executable an anonymous mapping or a + * private file mapping that will also be writable. +@@ -3015,7 +3017,6 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared + if (rc) + goto error; + } +-#endif + + if (file) { + /* read access is always possible with a mapping */ +@@ -3076,8 +3077,8 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, + if (selinux_checkreqprot) + prot = reqprot; + +-#ifndef CONFIG_PPC32 +- if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { ++ if (default_noexec && ++ (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { + int rc = 0; + if (vma->vm_start >= vma->vm_mm->start_brk && + vma->vm_end <= vma->vm_mm->brk) { +@@ -3099,7 +3100,6 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, + if (rc) + return rc; + } +-#endif + + return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED); + } +@@ -5662,6 +5662,8 @@ static __init int selinux_init(void) + /* Set the security state for the initial task. */ + cred_init_security(); + ++ default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC); ++ + sel_inode_cache = kmem_cache_create("selinux_inode_security", + sizeof(struct inode_security_struct), + 0, SLAB_PANIC, NULL); + +-- +Stephen Smalley +National Security Agency + diff --git a/linux-2.6-serial-460800.patch b/linux-2.6-serial-460800.patch new file mode 100644 index 000000000..17d67ef64 --- /dev/null +++ b/linux-2.6-serial-460800.patch @@ -0,0 +1,70 @@ +diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c +index 2209620..659c1bb 100644 +--- a/drivers/serial/8250.c ++++ b/drivers/serial/8250.c +@@ -7,6 +7,9 @@ + * + * Copyright (C) 2001 Russell King. + * ++ * 2005/09/16: Enabled higher baud rates for 16C95x. ++ * (Mathias Adam ) ++ * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or +@@ -2227,6 +2230,14 @@ static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int + else if ((port->flags & UPF_MAGIC_MULTIPLIER) && + baud == (port->uartclk/8)) + quot = 0x8002; ++ /* ++ * For 16C950s UART_TCR is used in combination with divisor==1 ++ * to achieve baud rates up to baud_base*4. ++ */ ++ else if ((port->type == PORT_16C950) && ++ baud > (port->uartclk/16)) ++ quot = 1; ++ + else + quot = uart_get_divisor(port, baud); + +@@ -2240,7 +2251,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, + struct uart_8250_port *up = (struct uart_8250_port *)port; + unsigned char cval, fcr = 0; + unsigned long flags; +- unsigned int baud, quot; ++ unsigned int baud, quot, max_baud; + + switch (termios->c_cflag & CSIZE) { + case CS5: +@@ -2272,9 +2283,10 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, + /* + * Ask the core to calculate the divisor for us. + */ ++ max_baud = (up->port.type == PORT_16C950 ? port->uartclk/4 : port->uartclk/16); + baud = uart_get_baud_rate(port, termios, old, + port->uartclk / 16 / 0xffff, +- port->uartclk / 16); ++ max_baud); + quot = serial8250_get_divisor(port, baud); + + /* +@@ -2311,6 +2323,19 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, + spin_lock_irqsave(&up->port.lock, flags); + + /* ++ * 16C950 supports additional prescaler ratios between 1:16 and 1:4 ++ * thus increasing max baud rate to uartclk/4. ++ */ ++ if (up->port.type == PORT_16C950) { ++ if (baud == port->uartclk/4) ++ serial_icr_write(up, UART_TCR, 0x4); ++ else if (baud == port->uartclk/8) ++ serial_icr_write(up, UART_TCR, 0x8); ++ else ++ serial_icr_write(up, UART_TCR, 0); ++ } ++ ++ /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); diff --git a/linux-2.6-silence-acpi-blacklist.patch b/linux-2.6-silence-acpi-blacklist.patch new file mode 100644 index 000000000..c5997bb6e --- /dev/null +++ b/linux-2.6-silence-acpi-blacklist.patch @@ -0,0 +1,25 @@ +diff -up linux-2.6.26.noarch/drivers/acpi/blacklist.c.jx linux-2.6.26.noarch/drivers/acpi/blacklist.c +--- linux-2.6.26.noarch/drivers/acpi/blacklist.c.jx 2008-07-13 17:51:29.000000000 -0400 ++++ linux-2.6.26.noarch/drivers/acpi/blacklist.c 2008-08-12 14:21:39.000000000 -0400 +@@ -81,18 +81,18 @@ static int __init blacklist_by_year(void + + /* Doesn't exist? Likely an old system */ + if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) { +- printk(KERN_ERR PREFIX "no DMI BIOS year, " ++ printk(KERN_INFO PREFIX "no DMI BIOS year, " + "acpi=force is required to enable ACPI\n" ); + return 1; + } + /* 0? Likely a buggy new BIOS */ + if (year == 0) { +- printk(KERN_ERR PREFIX "DMI BIOS year==0, " ++ printk(KERN_INFO PREFIX "DMI BIOS year==0, " + "assuming ACPI-capable machine\n" ); + return 0; + } + if (year < CONFIG_ACPI_BLACKLIST_YEAR) { +- printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), " ++ printk(KERN_INFO PREFIX "BIOS age (%d) fails cutoff (%d), " + "acpi=force is required to enable ACPI\n", + year, CONFIG_ACPI_BLACKLIST_YEAR); + return 1; diff --git a/linux-2.6-silence-fbcon-logo.patch b/linux-2.6-silence-fbcon-logo.patch new file mode 100644 index 000000000..45ab73331 --- /dev/null +++ b/linux-2.6-silence-fbcon-logo.patch @@ -0,0 +1,42 @@ +diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c +index 1657b96..4c5c2be 100644 +--- a/drivers/video/console/fbcon.c ++++ b/drivers/video/console/fbcon.c +@@ -631,13 +631,15 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, + kfree(save); + } + +- if (logo_lines > vc->vc_bottom) { +- logo_shown = FBCON_LOGO_CANSHOW; +- printk(KERN_INFO +- "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n"); +- } else if (logo_shown != FBCON_LOGO_DONTSHOW) { +- logo_shown = FBCON_LOGO_DRAW; +- vc->vc_top = logo_lines; ++ if (logo_shown != FBCON_LOGO_DONTSHOW) { ++ if (logo_lines > vc->vc_bottom) { ++ logo_shown = FBCON_LOGO_CANSHOW; ++ printk(KERN_INFO ++ "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n"); ++ } else { ++ logo_shown = FBCON_LOGO_DRAW; ++ vc->vc_top = logo_lines; ++ } + } + } + #endif /* MODULE */ +@@ -3489,6 +3491,14 @@ static int __init fb_console_init(void) + return 0; + } + ++static int __init quiet_logo(char *str) ++{ ++ logo_shown = FBCON_LOGO_DONTSHOW; ++ return 0; ++} ++ ++early_param("quiet", quiet_logo); ++ + module_init(fb_console_init); + + #ifdef MODULE diff --git a/linux-2.6-silence-noise.patch b/linux-2.6-silence-noise.patch new file mode 100644 index 000000000..119a97769 --- /dev/null +++ b/linux-2.6-silence-noise.patch @@ -0,0 +1,66 @@ +--- linux-2.6.26.noarch/drivers/base/power/main.c~ 2008-08-22 20:57:57.000000000 -0400 ++++ linux-2.6.26.noarch/drivers/base/power/main.c 2008-08-22 20:58:05.000000000 -0400 +@@ -69,9 +69,6 @@ void device_pm_unlock(void) + */ + void device_pm_add(struct device *dev) + { +- pr_debug("PM: Adding info for %s:%s\n", +- dev->bus ? dev->bus->name : "No Bus", +- kobject_name(&dev->kobj)); + mutex_lock(&dpm_list_mtx); + if (dev->parent) { + if (dev->parent->power.status >= DPM_SUSPENDING) +From b4e96f34c17e5a79cd28774cc722bb33e7e02c6e Mon Sep 17 00:00:00 2001 +From: Peter Jones +Date: Thu, 25 Sep 2008 16:23:33 -0400 +Subject: [PATCH] Don't print an error message just because there's no i8042 chip. + +Some systems, such as EFI-based Apple systems, won't necessarily have an +i8042 to initialize. We shouldn't be printing an error message in this +case, since not detecting the chip is the correct behavior. +--- + drivers/input/serio/i8042.c | 4 +--- + 1 files changed, 1 insertions(+), 3 deletions(-) + +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 170f71e..4f3e632 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -701,10 +701,8 @@ static int __devinit i8042_check_aux(void) + + static int i8042_controller_check(void) + { +- if (i8042_flush() == I8042_BUFFER_SIZE) { +- printk(KERN_ERR "i8042.c: No controller found.\n"); ++ if (i8042_flush() == I8042_BUFFER_SIZE) + return -ENODEV; +- } + + return 0; + } +-- +1.6.0.1 + +Socket fuzzers like sfuzz will trigger this printk a lot, even though it's +ratelimited. It isn't particularly useful, so just remove it. + +Signed-off-by: Dave Jones + +--- linux-2.6.27.noarch/net/can/af_can.c~ 2008-12-11 16:53:48.000000000 -0500 ++++ linux-2.6.27.noarch/net/can/af_can.c 2008-12-11 16:54:42.000000000 -0500 +@@ -134,13 +134,9 @@ static int can_create(struct net *net, s + err = request_module("can-proto-%d", protocol); + + /* +- * In case of error we only print a message but don't +- * return the error code immediately. Below we will +- * return -EPROTONOSUPPORT ++ * In case of error we don't return the error code immediately. ++ * Below we will return -EPROTONOSUPPORT + */ +- if (err && printk_ratelimit()) +- printk(KERN_ERR "can: request_module " +- "(can-proto-%d) failed.\n", protocol); + } + #endif + diff --git a/linux-2.6-sparc-selinux-mprotect-checks.patch b/linux-2.6-sparc-selinux-mprotect-checks.patch new file mode 100644 index 000000000..cc821e323 --- /dev/null +++ b/linux-2.6-sparc-selinux-mprotect-checks.patch @@ -0,0 +1,35 @@ +diff -up linux-2.6.24.sparc64/security/selinux/hooks.c.BAD linux-2.6.24.sparc64/security/selinux/hooks.c +--- linux-2.6.24.sparc64/security/selinux/hooks.c.BAD 2008-03-21 14:28:06.000000000 -0400 ++++ linux-2.6.24.sparc64/security/selinux/hooks.c 2008-03-21 14:29:10.000000000 -0400 +@@ -3018,6 +3018,7 @@ static int file_map_prot_check(struct fi + const struct cred *cred = current_cred(); + int rc = 0; + ++#ifndef CONFIG_SPARC + if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { + /* + * We are making executable an anonymous mapping or a +@@ -3028,6 +3029,7 @@ static int file_map_prot_check(struct fi + if (rc) + goto error; + } ++#endif + + if (file) { + /* read access is always possible with a mapping */ +@@ -3081,6 +3081,7 @@ static int selinux_file_mprotect(struct + if (selinux_checkreqprot) + prot = reqprot; + ++#ifndef CONFIG_SPARC + if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { + rc = 0; + if (vma->vm_start >= vma->vm_mm->start_brk && +@@ -3103,6 +3103,7 @@ static int selinux_file_mprotect(struct + if (rc) + return rc; + } ++#endif + + return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED); + } diff --git a/linux-2.6-tracehook.patch b/linux-2.6-tracehook.patch new file mode 100644 index 000000000..bfed531c6 --- /dev/null +++ b/linux-2.6-tracehook.patch @@ -0,0 +1,129 @@ +From: Oleg Nesterov + +[PATCH] signals: check ->group_stop_count after tracehook_get_signal() + +Move the call to do_signal_stop() down, after tracehook call. +This makes ->group_stop_count condition visible to tracers before +do_signal_stop() will participate in this group-stop. + +Currently the patch has no effect, tracehook_get_signal() always +returns 0. + +Signed-off-by: Oleg Nesterov +Signed-off-by: Roland McGrath +--- + include/linux/ptrace.h | 1 + + include/linux/sched.h | 1 + + include/linux/tracehook.h | 10 +++++----- + kernel/ptrace.c | 2 +- + kernel/signal.c | 4 ++-- + 5 files changed, 10 insertions(+), 8 deletions(-) + +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index e1fb607..0d84f1e 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -105,6 +105,7 @@ extern int ptrace_traceme(void); + extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); + extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); + extern int ptrace_attach(struct task_struct *tsk); ++extern bool __ptrace_detach(struct task_struct *tracer, struct task_struct *tracee); + extern int ptrace_detach(struct task_struct *, unsigned int); + extern void ptrace_disable(struct task_struct *); + extern int ptrace_check_attach(struct task_struct *task, int kill); +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 2b7b81d..82e4494 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2034,6 +2034,7 @@ extern int kill_pgrp(struct pid *pid, in + extern int kill_pid(struct pid *pid, int sig, int priv); + extern int kill_proc_info(int, struct siginfo *, pid_t); + extern int do_notify_parent(struct task_struct *, int); ++extern void do_notify_parent_cldstop(struct task_struct *, int); + extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); + extern void force_sig(int, struct task_struct *); + extern int send_sig(int, struct task_struct *, int); +diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h +index 10db010..c78b2f4 100644 +--- a/include/linux/tracehook.h ++++ b/include/linux/tracehook.h +@@ -134,7 +134,7 @@ static inline __must_check int tracehook + */ + static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) + { +- if (step) { ++ if (step && (task_ptrace(current) & PT_PTRACED)) { + siginfo_t info; + user_single_step_siginfo(current, regs, &info); + force_sig_info(SIGTRAP, &info, current); +@@ -156,7 +156,7 @@ static inline int tracehook_unsafe_exec( + { + int unsafe = 0; + int ptrace = task_ptrace(task); +- if (ptrace & PT_PTRACED) { ++ if (ptrace) { + if (ptrace & PT_PTRACE_CAP) + unsafe |= LSM_UNSAFE_PTRACE_CAP; + else +@@ -178,7 +178,7 @@ static inline int tracehook_unsafe_exec( + */ + static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) + { +- if (task_ptrace(tsk) & PT_PTRACED) ++ if (task_ptrace(tsk)) + return rcu_dereference(tsk->parent); + return NULL; + } +@@ -386,7 +386,7 @@ static inline void tracehook_signal_hand + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) + { +- if (stepping) ++ if (stepping && (task_ptrace(current) & PT_PTRACED)) + ptrace_notify(SIGTRAP); + } + +@@ -492,7 +492,7 @@ static inline int tracehook_get_signal(s + */ + static inline int tracehook_notify_jctl(int notify, int why) + { +- return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; ++ return notify ?: task_ptrace(current) ? why : 0; + } + + /** +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 42ad8ae..067f120 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -272,7 +272,7 @@ static int ignoring_children(struct sigh + * reap it now, in that case we must also wake up sub-threads sleeping in + * do_wait(). + */ +-static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) ++bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) + { + __ptrace_unlink(p); + +diff --git a/kernel/signal.c b/kernel/signal.c +index dbd7fe0..5122b80 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1515,7 +1515,7 @@ int do_notify_parent(struct task_struct + return ret; + } + +-static void do_notify_parent_cldstop(struct task_struct *tsk, int why) ++void do_notify_parent_cldstop(struct task_struct *tsk, int why) + { + struct siginfo info; + unsigned long flags; +@@ -1785,7 +1785,7 @@ static int do_signal_stop(int signr) + static int ptrace_signal(int signr, siginfo_t *info, + struct pt_regs *regs, void *cookie) + { +- if (!task_ptrace(current)) ++ if (!(task_ptrace(current) & PT_PTRACED)) + return signr; + + ptrace_signal_deliver(regs, cookie); diff --git a/linux-2.6-umh-refactor.patch b/linux-2.6-umh-refactor.patch new file mode 100644 index 000000000..1980761a9 --- /dev/null +++ b/linux-2.6-umh-refactor.patch @@ -0,0 +1,404 @@ +diff -up linux-2.6.32.noarch/fs/exec.c.orig linux-2.6.32.noarch/fs/exec.c +--- linux-2.6.32.noarch/fs/exec.c.orig 2010-02-05 06:57:45.000000000 -0500 ++++ linux-2.6.32.noarch/fs/exec.c 2010-02-05 06:57:31.000000000 -0500 +@@ -1762,6 +1762,50 @@ static void wait_for_dump_helpers(struct + } + + ++/* ++ * uhm_pipe_setup ++ * helper function to customize the process used ++ * to collect the core in userspace. Specifically ++ * it sets up a pipe and installs it as fd 0 (stdin) ++ * for the process. Returns 0 on success, or ++ * PTR_ERR on failure. ++ * Note that it also sets the core limit to 1. This ++ * is a special value that we use to trap recursive ++ * core dumps ++ */ ++static int umh_pipe_setup(struct subprocess_info *info) ++{ ++ struct file *rp, *wp; ++ struct fdtable *fdt; ++ struct coredump_params *cp = (struct coredump_params *)info->data; ++ struct files_struct *cf = current->files; ++ ++ wp = create_write_pipe(0); ++ if (IS_ERR(wp)) ++ return PTR_ERR(wp); ++ ++ rp = create_read_pipe(wp, 0); ++ if (IS_ERR(rp)) { ++ free_write_pipe(wp); ++ return PTR_ERR(rp); ++ } ++ ++ cp->file = wp; ++ ++ sys_close(0); ++ fd_install(0, rp); ++ spin_lock(&cf->file_lock); ++ fdt = files_fdtable(cf); ++ FD_SET(0, fdt->open_fds); ++ FD_CLR(0, fdt->close_on_exec); ++ spin_unlock(&cf->file_lock); ++ ++ /* and disallow core files too */ ++ current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; ++ ++ return 0; ++} ++ + void do_coredump(long signr, int exit_code, struct pt_regs *regs) + { + struct core_state core_state; +@@ -1842,15 +1886,15 @@ void do_coredump(long signr, int exit_co + goto fail_unlock; + + if (ispipe) { +- if (cprm.limit == 0) { ++ if (cprm.limit == 1) { + /* + * Normally core limits are irrelevant to pipes, since + * we're not writing to the file system, but we use +- * cprm.limit of 0 here as a speacial value. Any +- * non-zero limit gets set to RLIM_INFINITY below, but ++ * cprm.limit of 1 here as a speacial value. Any ++ * non-1 limit gets set to RLIM_INFINITY below, but + * a limit of 0 skips the dump. This is a consistent + * way to catch recursive crashes. We can still crash +- * if the core_pattern binary sets RLIM_CORE = !0 ++ * if the core_pattern binary sets RLIM_CORE = !1 + * but it runs as root, and can do lots of stupid things + * Note that we use task_tgid_vnr here to grab the pid + * of the process group leader. That way we get the +@@ -1858,7 +1902,7 @@ void do_coredump(long signr, int exit_co + * core_pattern process dies. + */ + printk(KERN_WARNING +- "Process %d(%s) has RLIMIT_CORE set to 0\n", ++ "Process %d(%s) has RLIMIT_CORE set to 1\n", + task_tgid_vnr(current), current->comm); + printk(KERN_WARNING "Aborting core\n"); + goto fail_unlock; +@@ -1882,8 +1926,13 @@ void do_coredump(long signr, int exit_co + cprm.limit = RLIM_INFINITY; + + /* SIGPIPE can happen, but it's just never processed */ +- if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, +- &cprm.file)) { ++ cprm.file = NULL; ++ if (call_usermodehelper_fns(helper_argv[0], helper_argv, NULL, ++ UMH_WAIT_EXEC, umh_pipe_setup, ++ NULL, &cprm)) { ++ if (cprm.file) ++ filp_close(cprm.file, NULL); ++ + printk(KERN_INFO "Core dump to %s pipe failed\n", + corename); + goto fail_dropcount; +diff -up linux-2.6.32.noarch/include/linux/kmod.h.orig linux-2.6.32.noarch/include/linux/kmod.h +--- linux-2.6.32.noarch/include/linux/kmod.h.orig 2010-02-05 06:57:45.000000000 -0500 ++++ linux-2.6.32.noarch/include/linux/kmod.h 2010-02-05 06:57:31.000000000 -0500 +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #define KMOD_PATH_LEN 256 + +@@ -44,7 +45,26 @@ static inline int request_module_nowait( + + struct key; + struct file; +-struct subprocess_info; ++ ++enum umh_wait { ++ UMH_NO_WAIT = -1, /* don't wait at all */ ++ UMH_WAIT_EXEC = 0, /* wait for the exec, but not the process */ ++ UMH_WAIT_PROC = 1, /* wait for the process to complete */ ++}; ++ ++struct subprocess_info { ++ struct work_struct work; ++ struct completion *complete; ++ struct cred *cred; ++ char *path; ++ char **argv; ++ char **envp; ++ enum umh_wait wait; ++ int retval; ++ int (*init)(struct subprocess_info *info); ++ void (*cleanup)(struct subprocess_info *info); ++ void *data; ++}; + + /* Allocate a subprocess_info structure */ + struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, +@@ -55,14 +75,10 @@ void call_usermodehelper_setkeys(struct + struct key *session_keyring); + int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info, + struct file **filp); +-void call_usermodehelper_setcleanup(struct subprocess_info *info, +- void (*cleanup)(char **argv, char **envp)); +- +-enum umh_wait { +- UMH_NO_WAIT = -1, /* don't wait at all */ +- UMH_WAIT_EXEC = 0, /* wait for the exec, but not the process */ +- UMH_WAIT_PROC = 1, /* wait for the process to complete */ +-}; ++void call_usermodehelper_setfns(struct subprocess_info *info, ++ int (*init)(struct subprocess_info *info), ++ void (*cleanup)(struct subprocess_info *info), ++ void *data); + + /* Actually execute the sub-process */ + int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); +@@ -72,7 +88,10 @@ int call_usermodehelper_exec(struct subp + void call_usermodehelper_freeinfo(struct subprocess_info *info); + + static inline int +-call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) ++call_usermodehelper_fns(char *path, char **argv, char **envp, ++ enum umh_wait wait, ++ int (*init)(struct subprocess_info *info), ++ void (*cleanup)(struct subprocess_info *), void *data) + { + struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; +@@ -80,10 +99,18 @@ call_usermodehelper(char *path, char **a + info = call_usermodehelper_setup(path, argv, envp, gfp_mask); + if (info == NULL) + return -ENOMEM; ++ call_usermodehelper_setfns(info, init, cleanup, data); + return call_usermodehelper_exec(info, wait); + } + + static inline int ++call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) ++{ ++ return call_usermodehelper_fns(path, argv, envp, ++ wait, NULL, NULL, NULL); ++} ++ ++static inline int + call_usermodehelper_keys(char *path, char **argv, char **envp, + struct key *session_keyring, enum umh_wait wait) + { +@@ -100,10 +127,6 @@ call_usermodehelper_keys(char *path, cha + + extern void usermodehelper_init(void); + +-struct file; +-extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[], +- struct file **filp); +- + extern int usermodehelper_disable(void); + extern void usermodehelper_enable(void); + +diff -up linux-2.6.32.noarch/kernel/kmod.c.orig linux-2.6.32.noarch/kernel/kmod.c +--- linux-2.6.32.noarch/kernel/kmod.c.orig 2010-02-05 06:57:45.000000000 -0500 ++++ linux-2.6.32.noarch/kernel/kmod.c 2010-02-05 06:57:31.000000000 -0500 +@@ -124,19 +124,6 @@ int __request_module(bool wait, const ch + EXPORT_SYMBOL(__request_module); + #endif /* CONFIG_MODULES */ + +-struct subprocess_info { +- struct work_struct work; +- struct completion *complete; +- struct cred *cred; +- char *path; +- char **argv; +- char **envp; +- enum umh_wait wait; +- int retval; +- struct file *stdin; +- void (*cleanup)(char **argv, char **envp); +-}; +- + /* + * This is the task which runs the usermode application + */ +@@ -158,26 +145,15 @@ static int ____call_usermodehelper(void + commit_creds(sub_info->cred); + sub_info->cred = NULL; + +- /* Install input pipe when needed */ +- if (sub_info->stdin) { +- struct files_struct *f = current->files; +- struct fdtable *fdt; +- /* no races because files should be private here */ +- sys_close(0); +- fd_install(0, sub_info->stdin); +- spin_lock(&f->file_lock); +- fdt = files_fdtable(f); +- FD_SET(0, fdt->open_fds); +- FD_CLR(0, fdt->close_on_exec); +- spin_unlock(&f->file_lock); +- +- /* and disallow core files too */ +- current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0}; +- } +- + /* We can run anywhere, unlike our parent keventd(). */ + set_cpus_allowed_ptr(current, cpu_all_mask); + ++ if (sub_info->init) { ++ retval = sub_info->init(sub_info); ++ if (retval) ++ goto fail; ++ } ++ + /* + * Our parent is keventd, which runs with elevated scheduling priority. + * Avoid propagating that into the userspace child. +@@ -187,6 +163,7 @@ static int ____call_usermodehelper(void + retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); + + /* Exec failed? */ ++fail: + sub_info->retval = retval; + do_exit(0); + } +@@ -194,7 +171,7 @@ static int ____call_usermodehelper(void + void call_usermodehelper_freeinfo(struct subprocess_info *info) + { + if (info->cleanup) +- (*info->cleanup)(info->argv, info->envp); ++ (*info->cleanup)(info); + if (info->cred) + put_cred(info->cred); + kfree(info); +@@ -406,50 +383,31 @@ void call_usermodehelper_setkeys(struct + EXPORT_SYMBOL(call_usermodehelper_setkeys); + + /** +- * call_usermodehelper_setcleanup - set a cleanup function ++ * call_usermodehelper_setfns - set a cleanup/init function + * @info: a subprocess_info returned by call_usermodehelper_setup + * @cleanup: a cleanup function ++ * @init: an init function ++ * @data: arbitrary context sensitive data + * +- * The cleanup function is just befor ethe subprocess_info is about to ++ * The init function is used to customize the helper process prior to ++ * exec. A non-zero return code causes the process to error out, exit, ++ * and return the failure to the calling process ++ * ++ * The cleanup function is just before ethe subprocess_info is about to + * be freed. This can be used for freeing the argv and envp. The + * Function must be runnable in either a process context or the + * context in which call_usermodehelper_exec is called. + */ +-void call_usermodehelper_setcleanup(struct subprocess_info *info, +- void (*cleanup)(char **argv, char **envp)) ++void call_usermodehelper_setfns(struct subprocess_info *info, ++ int (*init)(struct subprocess_info *info), ++ void (*cleanup)(struct subprocess_info *info), ++ void *data) + { + info->cleanup = cleanup; ++ info->init = init; ++ info->data = data; + } +-EXPORT_SYMBOL(call_usermodehelper_setcleanup); +- +-/** +- * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin +- * @sub_info: a subprocess_info returned by call_usermodehelper_setup +- * @filp: set to the write-end of a pipe +- * +- * This constructs a pipe, and sets the read end to be the stdin of the +- * subprocess, and returns the write-end in *@filp. +- */ +-int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info, +- struct file **filp) +-{ +- struct file *f; +- +- f = create_write_pipe(0); +- if (IS_ERR(f)) +- return PTR_ERR(f); +- *filp = f; +- +- f = create_read_pipe(f, 0); +- if (IS_ERR(f)) { +- free_write_pipe(*filp); +- return PTR_ERR(f); +- } +- sub_info->stdin = f; +- +- return 0; +-} +-EXPORT_SYMBOL(call_usermodehelper_stdinpipe); ++EXPORT_SYMBOL(call_usermodehelper_setfns); + + /** + * call_usermodehelper_exec - start a usermode application +@@ -498,41 +456,6 @@ unlock: + } + EXPORT_SYMBOL(call_usermodehelper_exec); + +-/** +- * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin +- * @path: path to usermode executable +- * @argv: arg vector for process +- * @envp: environment for process +- * @filp: set to the write-end of a pipe +- * +- * This is a simple wrapper which executes a usermode-helper function +- * with a pipe as stdin. It is implemented entirely in terms of +- * lower-level call_usermodehelper_* functions. +- */ +-int call_usermodehelper_pipe(char *path, char **argv, char **envp, +- struct file **filp) +-{ +- struct subprocess_info *sub_info; +- int ret; +- +- sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL); +- if (sub_info == NULL) +- return -ENOMEM; +- +- ret = call_usermodehelper_stdinpipe(sub_info, filp); +- if (ret < 0) { +- call_usermodehelper_freeinfo(sub_info); +- return ret; +- } +- +- ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); +- if (ret < 0) /* Failed to execute helper, close pipe */ +- filp_close(*filp, NULL); +- +- return ret; +-} +-EXPORT_SYMBOL(call_usermodehelper_pipe); +- + void __init usermodehelper_init(void) + { + khelper_wq = create_singlethread_workqueue("khelper"); +diff -up linux-2.6.32.noarch/kernel/sys.c.orig linux-2.6.32.noarch/kernel/sys.c +--- linux-2.6.32.noarch/kernel/sys.c.orig 2010-02-05 06:57:45.000000000 -0500 ++++ linux-2.6.32.noarch/kernel/sys.c 2010-02-05 06:48:30.000000000 -0500 +@@ -1599,9 +1599,9 @@ SYSCALL_DEFINE3(getcpu, unsigned __user + + char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; + +-static void argv_cleanup(char **argv, char **envp) ++static void argv_cleanup(struct subprocess_info *info) + { +- argv_free(argv); ++ argv_free(info->argv); + } + + /** +@@ -1635,7 +1635,7 @@ int orderly_poweroff(bool force) + goto out; + } + +- call_usermodehelper_setcleanup(info, argv_cleanup); ++ call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL); + + ret = call_usermodehelper_exec(info, UMH_NO_WAIT); + diff --git a/linux-2.6-upstream-reverts.patch b/linux-2.6-upstream-reverts.patch new file mode 100644 index 000000000..4c903f783 --- /dev/null +++ b/linux-2.6-upstream-reverts.patch @@ -0,0 +1,953 @@ +From c05556421742eb47f80301767653a4bcb19de9de Mon Sep 17 00:00:00 2001 +From: Ian Munsie +Date: Tue, 13 Apr 2010 18:37:33 +1000 +Subject: perf: Fix endianness argument compatibility with OPT_BOOLEAN() and introduce OPT_INCR() + +From: Ian Munsie + +commit c05556421742eb47f80301767653a4bcb19de9de upstream. + +Parsing an option from the command line with OPT_BOOLEAN on a +bool data type would not work on a big-endian machine due to the +manner in which the boolean was being cast into an int and +incremented. For example, running 'perf probe --list' on a +PowerPC machine would fail to properly set the list_events bool +and would therefore print out the usage information and +terminate. + +This patch makes OPT_BOOLEAN work as expected with a bool +datatype. For cases where the original OPT_BOOLEAN was +intentionally being used to increment an int each time it was +passed in on the command line, this patch introduces OPT_INCR +with the old behaviour of OPT_BOOLEAN (the verbose variable is +currently the only such example of this). + +I have reviewed every use of OPT_BOOLEAN to verify that a true +C99 bool was passed. Where integers were used, I verified that +they were only being used for boolean logic and changed them to +bools to ensure that they would not be mistakenly used as ints. +The major exception was the verbose variable which now uses +OPT_INCR instead of OPT_BOOLEAN. + +Signed-off-by: Ian Munsie +Acked-by: David S. Miller +Cc: +Cc: Git development list +Cc: Ian Munsie +Cc: Peter Zijlstra +Cc: Paul Mackerras +Cc: Arnaldo Carvalho de Melo +Cc: KOSAKI Motohiro +Cc: Hitoshi Mitake +Cc: Rusty Russell +Cc: Frederic Weisbecker +Cc: Eric B Munson +Cc: Valdis.Kletnieks@vt.edu +Cc: WANG Cong +Cc: Thiago Farina +Cc: Masami Hiramatsu +Cc: Xiao Guangrong +Cc: Jaswinder Singh Rajput +Cc: Arjan van de Ven +Cc: OGAWA Hirofumi +Cc: Mike Galbraith +Cc: Tom Zanussi +Cc: Anton Blanchard +Cc: John Kacur +Cc: Li Zefan +Cc: Steven Rostedt +LKML-Reference: <1271147857-11604-1-git-send-email-imunsie@au.ibm.com> +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + tools/perf/bench/mem-memcpy.c | 2 +- + tools/perf/bench/sched-messaging.c | 4 ++-- + tools/perf/builtin-annotate.c | 8 ++++---- + tools/perf/builtin-buildid-cache.c | 2 +- + tools/perf/builtin-buildid-list.c | 4 ++-- + tools/perf/builtin-diff.c | 4 ++-- + tools/perf/builtin-help.c | 2 +- + tools/perf/builtin-lock.c | 2 +- + tools/perf/builtin-probe.c | 2 +- + tools/perf/builtin-record.c | 24 ++++++++++++------------ + tools/perf/builtin-report.c | 6 +++--- + tools/perf/builtin-sched.c | 6 +++--- + tools/perf/builtin-stat.c | 10 +++++----- + tools/perf/builtin-timechart.c | 2 +- + tools/perf/builtin-top.c | 14 +++++++------- + tools/perf/builtin-trace.c | 2 +- + tools/perf/util/debug.c | 2 +- + tools/perf/util/debug.h | 3 ++- + tools/perf/util/parse-options.c | 6 ++++++ + tools/perf/util/parse-options.h | 4 +++- + tools/perf/util/trace-event-parse.c | 2 +- + tools/perf/util/trace-event.h | 3 ++- + 22 files changed, 62 insertions(+), 52 deletions(-) + +--- a/tools/perf/bench/mem-memcpy.c ++++ b/tools/perf/bench/mem-memcpy.c +@@ -24,7 +24,7 @@ + + static const char *length_str = "1MB"; + static const char *routine = "default"; +-static int use_clock = 0; ++static bool use_clock = false; + static int clock_fd; + + static const struct option options[] = { +--- a/tools/perf/bench/sched-messaging.c ++++ b/tools/perf/bench/sched-messaging.c +@@ -31,9 +31,9 @@ + + #define DATASIZE 100 + +-static int use_pipes = 0; ++static bool use_pipes = false; + static unsigned int loops = 100; +-static unsigned int thread_mode = 0; ++static bool thread_mode = false; + static unsigned int num_groups = 10; + + struct sender_context { +--- a/tools/perf/builtin-annotate.c ++++ b/tools/perf/builtin-annotate.c +@@ -29,11 +29,11 @@ + + static char const *input_name = "perf.data"; + +-static int force; ++static bool force; + +-static int full_paths; ++static bool full_paths; + +-static int print_line; ++static bool print_line; + + struct sym_hist { + u64 sum; +@@ -584,7 +584,7 @@ static const struct option options[] = { + OPT_STRING('s', "symbol", &sym_hist_filter, "symbol", + "symbol to annotate"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), +--- a/tools/perf/builtin-buildid-cache.c ++++ b/tools/perf/builtin-buildid-cache.c +@@ -27,7 +27,7 @@ static const struct option buildid_cache + "file list", "file(s) to add"), + OPT_STRING('r', "remove", &remove_name_list_str, "file list", + "file(s) to remove"), +- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"), ++ OPT_INCR('v', "verbose", &verbose, "be more verbose"), + OPT_END() + }; + +--- a/tools/perf/builtin-buildid-list.c ++++ b/tools/perf/builtin-buildid-list.c +@@ -16,7 +16,7 @@ + #include "util/symbol.h" + + static char const *input_name = "perf.data"; +-static int force; ++static bool force; + static bool with_hits; + + static const char * const buildid_list_usage[] = { +@@ -29,7 +29,7 @@ static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose"), + OPT_END() + }; +--- a/tools/perf/builtin-diff.c ++++ b/tools/perf/builtin-diff.c +@@ -19,7 +19,7 @@ + static char const *input_old = "perf.data.old", + *input_new = "perf.data"; + static char diff__default_sort_order[] = "dso,symbol"; +-static int force; ++static bool force; + static bool show_displacement; + + static int perf_session__add_hist_entry(struct perf_session *self, +@@ -188,7 +188,7 @@ static const char * const diff_usage[] = + }; + + static const struct option options[] = { +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('m', "displacement", &show_displacement, + "Show position displacement relative to baseline"), +--- a/tools/perf/builtin-help.c ++++ b/tools/perf/builtin-help.c +@@ -29,7 +29,7 @@ enum help_format { + HELP_FORMAT_WEB, + }; + +-static int show_all = 0; ++static bool show_all = false; + static enum help_format help_format = HELP_FORMAT_MAN; + static struct option builtin_help_options[] = { + OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), +--- a/tools/perf/builtin-lock.c ++++ b/tools/perf/builtin-lock.c +@@ -744,7 +744,7 @@ static const char * const lock_usage[] = + + static const struct option lock_options[] = { + OPT_STRING('i', "input", &input_name, "file", "input file name"), +- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), ++ OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), + OPT_END() + }; +--- a/tools/perf/builtin-probe.c ++++ b/tools/perf/builtin-probe.c +@@ -162,7 +162,7 @@ static const char * const probe_usage[] + }; + + static const struct option options[] = { +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show parsed arguments, etc)"), + #ifndef NO_DWARF_SUPPORT + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, +--- a/tools/perf/builtin-record.c ++++ b/tools/perf/builtin-record.c +@@ -39,19 +39,19 @@ static int output; + static const char *output_name = "perf.data"; + static int group = 0; + static unsigned int realtime_prio = 0; +-static int raw_samples = 0; +-static int system_wide = 0; ++static bool raw_samples = false; ++static bool system_wide = false; + static int profile_cpu = -1; + static pid_t target_pid = -1; + static pid_t child_pid = -1; +-static int inherit = 1; +-static int force = 0; +-static int append_file = 0; +-static int call_graph = 0; +-static int inherit_stat = 0; +-static int no_samples = 0; +-static int sample_address = 0; +-static int multiplex = 0; ++static bool inherit = true; ++static bool force = false; ++static bool append_file = false; ++static bool call_graph = false; ++static bool inherit_stat = false; ++static bool no_samples = false; ++static bool sample_address = false; ++static bool multiplex = false; + static int multiplex_fd = -1; + + static long samples = 0; +@@ -451,7 +451,7 @@ static int __cmd_record(int argc, const + rename(output_name, oldname); + } + } else { +- append_file = 0; ++ append_file = false; + } + + flags = O_CREAT|O_RDWR; +@@ -676,7 +676,7 @@ static const struct option options[] = { + "number of mmap data pages"), + OPT_BOOLEAN('g', "call-graph", &call_graph, + "do call-graph (stack chain/backtrace) recording"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), + OPT_BOOLEAN('s', "stat", &inherit_stat, + "per thread counts"), +--- a/tools/perf/builtin-report.c ++++ b/tools/perf/builtin-report.c +@@ -33,11 +33,11 @@ + + static char const *input_name = "perf.data"; + +-static int force; ++static bool force; + static bool hide_unresolved; + static bool dont_use_callchains; + +-static int show_threads; ++static bool show_threads; + static struct perf_read_values show_threads_values; + + static char default_pretty_printing_style[] = "normal"; +@@ -400,7 +400,7 @@ static const char * const report_usage[] + static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -1790,7 +1790,7 @@ static const char * const sched_usage[] + static const struct option sched_options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), +@@ -1805,7 +1805,7 @@ static const char * const latency_usage[ + static const struct option latency_options[] = { + OPT_STRING('s', "sort", &sort_order, "key[,key2...]", + "sort by key(s): runtime, switch, avg, max"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_INTEGER('C', "CPU", &profile_cpu, + "CPU to profile on"), +@@ -1822,7 +1822,7 @@ static const char * const replay_usage[] + static const struct option replay_options[] = { + OPT_INTEGER('r', "repeat", &replay_repeat, + "repeat the workload replay N times (-1: infinite)"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -66,16 +66,16 @@ static struct perf_event_attr default_at + + }; + +-static int system_wide = 0; ++static bool system_wide = false; + static unsigned int nr_cpus = 0; + static int run_idx = 0; + + static int run_count = 1; +-static int inherit = 1; +-static int scale = 1; ++static bool inherit = true; ++static bool scale = true; + static pid_t target_pid = -1; + static pid_t child_pid = -1; +-static int null_run = 0; ++static bool null_run = false; + + static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +@@ -494,7 +494,7 @@ static const struct option options[] = { + "system-wide collection from all CPUs"), + OPT_BOOLEAN('c', "scale", &scale, + "scale/normalize counters"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), + OPT_INTEGER('r', "repeat", &run_count, + "repeat command and print average + stddev (max: 100)"), +--- a/tools/perf/builtin-timechart.c ++++ b/tools/perf/builtin-timechart.c +@@ -43,7 +43,7 @@ static u64 turbo_frequency; + + static u64 first_time, last_time; + +-static int power_only; ++static bool power_only; + + + struct per_pid; +--- a/tools/perf/builtin-top.c ++++ b/tools/perf/builtin-top.c +@@ -57,7 +57,7 @@ + + static int fd[MAX_NR_CPUS][MAX_COUNTERS]; + +-static int system_wide = 0; ++static bool system_wide = false; + + static int default_interval = 0; + +@@ -65,18 +65,18 @@ static int count_filter = 5; + static int print_entries; + + static int target_pid = -1; +-static int inherit = 0; ++static bool inherit = false; + static int profile_cpu = -1; + static int nr_cpus = 0; + static unsigned int realtime_prio = 0; +-static int group = 0; ++static bool group = false; + static unsigned int page_size; + static unsigned int mmap_pages = 16; + static int freq = 1000; /* 1 KHz */ + + static int delay_secs = 2; +-static int zero = 0; +-static int dump_symtab = 0; ++static bool zero = false; ++static bool dump_symtab = false; + + static bool hide_kernel_symbols = false; + static bool hide_user_symbols = false; +@@ -839,7 +839,7 @@ static void handle_keypress(int c) + display_weighted = ~display_weighted; + break; + case 'z': +- zero = ~zero; ++ zero = !zero; + break; + default: + break; +@@ -1296,7 +1296,7 @@ static const struct option options[] = { + "display this many functions"), + OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols, + "hide user symbols"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), + OPT_END() + }; +--- a/tools/perf/builtin-trace.c ++++ b/tools/perf/builtin-trace.c +@@ -505,7 +505,7 @@ static const char * const trace_usage[] + static const struct option options[] = { + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), +- OPT_BOOLEAN('v', "verbose", &verbose, ++ OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('L', "Latency", &latency_format, + "show latency attributes (irqs/preemption disabled, etc)"), +--- a/tools/perf/util/debug.c ++++ b/tools/perf/util/debug.c +@@ -12,7 +12,7 @@ + #include "util.h" + + int verbose = 0; +-int dump_trace = 0; ++bool dump_trace = false; + + int eprintf(int level, const char *fmt, ...) + { +--- a/tools/perf/util/debug.h ++++ b/tools/perf/util/debug.h +@@ -2,10 +2,11 @@ + #ifndef __PERF_DEBUG_H + #define __PERF_DEBUG_H + ++#include + #include "event.h" + + extern int verbose; +-extern int dump_trace; ++extern bool dump_trace; + + int eprintf(int level, + const char *fmt, ...) __attribute__((format(printf, 2, 3))); +--- a/tools/perf/util/parse-options.c ++++ b/tools/perf/util/parse-options.c +@@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ct + break; + /* FALLTHROUGH */ + case OPTION_BOOLEAN: ++ case OPTION_INCR: + case OPTION_BIT: + case OPTION_SET_INT: + case OPTION_SET_PTR: +@@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ct + return 0; + + case OPTION_BOOLEAN: ++ *(bool *)opt->value = unset ? false : true; ++ return 0; ++ ++ case OPTION_INCR: + *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; + return 0; + +@@ -478,6 +483,7 @@ int usage_with_options_internal(const ch + case OPTION_GROUP: + case OPTION_BIT: + case OPTION_BOOLEAN: ++ case OPTION_INCR: + case OPTION_SET_INT: + case OPTION_SET_PTR: + case OPTION_LONG: +--- a/tools/perf/util/parse-options.h ++++ b/tools/perf/util/parse-options.h +@@ -8,7 +8,8 @@ enum parse_opt_type { + OPTION_GROUP, + /* options with no arguments */ + OPTION_BIT, +- OPTION_BOOLEAN, /* _INCR would have been a better name */ ++ OPTION_BOOLEAN, ++ OPTION_INCR, + OPTION_SET_INT, + OPTION_SET_PTR, + /* options with arguments (usually) */ +@@ -95,6 +96,7 @@ struct option { + #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } + #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) } + #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } ++#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } + #define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) } + #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } + #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } +--- a/tools/perf/util/trace-event-parse.c ++++ b/tools/perf/util/trace-event-parse.c +@@ -40,7 +40,7 @@ int header_page_size_size; + int header_page_data_offset; + int header_page_data_size; + +-int latency_format; ++bool latency_format; + + static char *input_buf; + static unsigned long long input_buf_ptr; +--- a/tools/perf/util/trace-event.h ++++ b/tools/perf/util/trace-event.h +@@ -1,6 +1,7 @@ + #ifndef __PERF_TRACE_EVENTS_H + #define __PERF_TRACE_EVENTS_H + ++#include + #include "parse-events.h" + + #define __unused __attribute__((unused)) +@@ -241,7 +242,7 @@ extern int header_page_size_size; + extern int header_page_data_offset; + extern int header_page_data_size; + +-extern int latency_format; ++extern bool latency_format; + + int parse_header_page(char *buf, unsigned long size); + int trace_parse_common_type(void *data); +From 6e0032f0ae4440e75256bee11b163552cae21962 Mon Sep 17 00:00:00 2001 +From: Karsten Wiese +Date: Sat, 27 Mar 2010 22:48:33 +0100 +Subject: drm/i915: Don't touch PORT_HOTPLUG_EN in intel_dp_detect() + +From: Karsten Wiese + +commit 6e0032f0ae4440e75256bee11b163552cae21962 upstream. + +PORT_HOTPLUG_EN has allready been setup in i915_driver_irq_postinstall(), +when intel_dp_detect() runs. + +Delete the DP[BCD]_HOTPLUG_INT_EN defines, they are not referenced anymore. + +I found this while searching for a fix for + https://bugzilla.redhat.com/show_bug.cgi?id=528312 + +Signed-off-by: Karsten Wiese +Signed-off-by: Eric Anholt +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/intel_dp.c | 10 ---------- + 1 file changed, 10 deletions(-) + +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *co + if (HAS_PCH_SPLIT(dev)) + return ironlake_dp_detect(connector); + +- temp = I915_READ(PORT_HOTPLUG_EN); +- +- I915_WRITE(PORT_HOTPLUG_EN, +- temp | +- DPB_HOTPLUG_INT_EN | +- DPC_HOTPLUG_INT_EN | +- DPD_HOTPLUG_INT_EN); +- +- POSTING_READ(PORT_HOTPLUG_EN); +- + switch (dp_priv->output_reg) { + case DP_B: + bit = DPB_HOTPLUG_INT_STATUS; +From 9908ff736adf261e749b4887486a32ffa209304c Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Sat, 15 May 2010 09:57:03 +0100 +Subject: drm/i915: Kill dangerous pending-flip debugging + +From: Chris Wilson + +commit 9908ff736adf261e749b4887486a32ffa209304c upstream. + +We can, by virtue of a vblank interrupt firing in the middle of setting +up the unpin work (i.e. after we set the unpin_work field and before we +write to the ringbuffer) enter intel_finish_page_flip() prior to +receiving the pending flip notification. Therefore we can expect to hit +intel_finish_page_flip() under normal circumstances without a pending flip +and even without installing the pending_flip_obj. This is exacerbated by +aperture thrashing whilst binding the framebuffer + +References: + + Bug 28079 - "glresize" causes kernel panic in intel_finish_page_flip. + https://bugs.freedesktop.org/show_bug.cgi?id=28079 + +Reported-by: Nick Bowler +Signed-off-by: Chris Wilson +Cc: Jesse Barnes +Reviewed-by: Jesse Barnes +Signed-off-by: Eric Anholt +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/intel_display.c | 6 ------ + 1 file changed, 6 deletions(-) + +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -4155,12 +4155,6 @@ void intel_finish_page_flip(struct drm_d + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + if (work == NULL || !work->pending) { +- if (work && !work->pending) { +- obj_priv = to_intel_bo(work->pending_flip_obj); +- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", +- obj_priv, +- atomic_read(&obj_priv->pending_flip)); +- } + spin_unlock_irqrestore(&dev->event_lock, flags); + return; + } +From ac0c6b5ad3b3b513e1057806d4b7627fcc0ecc27 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Thu, 27 May 2010 13:18:18 +0100 +Subject: drm/i915: Rebind bo if currently bound with incorrect alignment. + +From: Chris Wilson + +commit ac0c6b5ad3b3b513e1057806d4b7627fcc0ecc27 upstream. + +Whilst pinning the buffer, check that that its current alignment +matches the requested alignment. If it does not, rebind. + +This should clear up any final render errors whilst resuming, +for reference: + + Bug 27070 - [i915] Page table errors with empty ringbuffer + https://bugs.freedesktop.org/show_bug.cgi?id=27070 + + Bug 15502 - render error detected, EIR: 0x00000010 + https://bugzilla.kernel.org/show_bug.cgi?id=15502 + + Bug 13844 - i915 error: "render error detected" + https://bugzilla.kernel.org/show_bug.cgi?id=13844 + +Signed-off-by: Chris Wilson +Signed-off-by: Eric Anholt +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -4239,6 +4239,17 @@ i915_gem_object_pin(struct drm_gem_objec + int ret; + + i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ if (obj_priv->gtt_space != NULL) { ++ if (alignment == 0) ++ alignment = i915_gem_get_gtt_alignment(obj); ++ if (obj_priv->gtt_offset & (alignment - 1)) { ++ ret = i915_gem_object_unbind(obj); ++ if (ret) ++ return ret; ++ } ++ } ++ + if (obj_priv->gtt_space == NULL) { + ret = i915_gem_object_bind_to_gtt(obj, alignment); + if (ret) +From cf22f20ade30f8c03955324aaf27b1049e182600 Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Sat, 29 May 2010 06:50:37 +1000 +Subject: drm/radeon: fix the r100/r200 ums block 0 page fix + +From: Dave Airlie + +commit cf22f20ade30f8c03955324aaf27b1049e182600 upstream. + +airlied -> brown paper bag. + +I blame Hi-5 or the Wiggles for lowering my IQ, move the fix inside some +brackets instead of breaking everything in site. + +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/radeon_state.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/gpu/drm/radeon/radeon_state.c ++++ b/drivers/gpu/drm/radeon/radeon_state.c +@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(str + flags |= RADEON_FRONT; + } + if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { +- if (!dev_priv->have_z_offset) ++ if (!dev_priv->have_z_offset) { + printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); +- flags &= ~(RADEON_DEPTH | RADEON_STENCIL); ++ flags &= ~(RADEON_DEPTH | RADEON_STENCIL); ++ } + } + + if (flags & (RADEON_FRONT | RADEON_BACK)) { +From 10b06122afcc78468bd1d009633cb71e528acdc5 Mon Sep 17 00:00:00 2001 +From: Jerome Glisse +Date: Fri, 21 May 2010 18:48:54 +0200 +Subject: drm/radeon/kms: release AGP bridge at suspend + +From: Jerome Glisse + +commit 10b06122afcc78468bd1d009633cb71e528acdc5 upstream. + +I think it's good to release the AGP bridge at suspend +and reacquire it at resume. Also fix : +https://bugzilla.kernel.org/show_bug.cgi?id=15969 + +Signed-off-by: Jerome Glisse +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/radeon.h | 1 + + drivers/gpu/drm/radeon/radeon_agp.c | 5 +++++ + drivers/gpu/drm/radeon/radeon_device.c | 2 ++ + 3 files changed, 8 insertions(+) + +--- a/drivers/gpu/drm/radeon/radeon.h ++++ b/drivers/gpu/drm/radeon/radeon.h +@@ -566,6 +566,7 @@ typedef int (*radeon_packet3_check_t)(st + */ + int radeon_agp_init(struct radeon_device *rdev); + void radeon_agp_resume(struct radeon_device *rdev); ++void radeon_agp_suspend(struct radeon_device *rdev); + void radeon_agp_fini(struct radeon_device *rdev); + + +--- a/drivers/gpu/drm/radeon/radeon_agp.c ++++ b/drivers/gpu/drm/radeon/radeon_agp.c +@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_devic + } + #endif + } ++ ++void radeon_agp_suspend(struct radeon_device *rdev) ++{ ++ radeon_agp_fini(rdev); ++} +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -748,6 +748,8 @@ int radeon_suspend_kms(struct drm_device + /* evict remaining vram memory */ + radeon_bo_evict_vram(rdev); + ++ radeon_agp_suspend(rdev); ++ + pci_save_state(dev->pdev); + if (state.event == PM_EVENT_SUSPEND) { + /* Shut down the device */ +From 1ff26a3604d0292988d4cade0e49ba9918dbfd46 Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Tue, 18 May 2010 00:23:15 -0400 +Subject: drm/radeon/kms/atom: fix typo in LVDS panel info parsing + +From: Alex Deucher + +commit 1ff26a3604d0292988d4cade0e49ba9918dbfd46 upstream. + +Fixes LVDS issues on some laptops; notably laptops with +2048x1536 panels. + +Signed-off-by: Alex Deucher +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/radeon_atombios.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -1173,7 +1173,7 @@ struct radeon_encoder_atom_dig *radeon_a + lvds->native_mode.vtotal = lvds->native_mode.vdisplay + + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); + lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + +- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); ++ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); + lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); + lvds->panel_pwr_delay = +From 2bfcc0fc698d550689ef020c73b2d977b73e728c Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Tue, 18 May 2010 19:26:46 -0400 +Subject: drm/radeon/kms: reset ddc_bus in object header parsing + +From: Alex Deucher + +commit 2bfcc0fc698d550689ef020c73b2d977b73e728c upstream. + +Some LVDS connectors don't have a ddc bus, so reset the +ddc bus to invalid before parsing the next connector +to avoid using stale ddc bus data. Should fix +fdo bug 28164. + +Signed-off-by: Alex Deucher +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/radeon_atombios.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from + } + + /* look up gpio for ddc, hpd */ ++ ddc_bus.valid = false; ++ hpd.hpd = RADEON_HPD_NONE; + if ((le16_to_cpu(path->usDeviceTag) & + (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { + for (j = 0; j < con_obj->ucNumberOfObjects; j++) { +@@ -585,9 +587,6 @@ bool radeon_get_atom_connector_info_from + break; + } + } +- } else { +- hpd.hpd = RADEON_HPD_NONE; +- ddc_bus.valid = false; + } + + /* needed for aux chan transactions */ +From 61dd98fad58f945ed720ba132681acb58fcee015 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Thu, 13 May 2010 14:55:28 -0400 +Subject: drm/edid: Fix 1024x768@85Hz + +From: Adam Jackson + +commit 61dd98fad58f945ed720ba132681acb58fcee015 upstream. + +Having hsync both start and end on pixel 1072 ain't gonna work very +well. Matches the X server's list. + +Signed-off-by: Adam Jackson +Tested-By: Michael Tokarev +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/drm_edid.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_m + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@85Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, +- 1072, 1376, 0, 768, 769, 772, 808, 0, ++ 1168, 1376, 0, 768, 769, 772, 808, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1152x864@75Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, +From 45737447ed160faaba036c0709226bf9057f7b72 Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Thu, 20 May 2010 11:26:11 -0400 +Subject: drm/radeon/kms: don't default display priority to high on rs4xx + +From: Alex Deucher + +commit 45737447ed160faaba036c0709226bf9057f7b72 upstream. + +Seems to cause issues with the sound hardware. Fixes kernel +bug 15982: +https://bugzilla.kernel.org/show_bug.cgi?id=15982 + +Signed-off-by: Alex Deucher +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/radeon/radeon_display.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -978,8 +978,11 @@ void radeon_update_display_priority(stru + /* set display priority to high for r3xx, rv515 chips + * this avoids flickering due to underflow to the + * display controllers during heavy acceleration. ++ * Don't force high on rs4xx igp chips as it seems to ++ * affect the sound card. See kernel bug 15982. + */ +- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) ++ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && ++ !(rdev->flags & RADEON_IS_IGP)) + rdev->disp_priority = 2; + else + rdev->disp_priority = 0; +From 654fc6073f68efa3b6c466825749e73e7fbb92cd Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Thu, 27 May 2010 13:18:21 +0100 +Subject: drm/i915: Reject bind_to_gtt() early if object > aperture + +From: Chris Wilson + +commit 654fc6073f68efa3b6c466825749e73e7fbb92cd upstream. + +If the object is bigger than the entire aperture, reject it early +before evicting everything in a vain attempt to find space. + +v2: Use E2BIG as suggested by Owain G. Ainsworth. + +Signed-off-by: Chris Wilson +Signed-off-by: Eric Anholt +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/i915_gem.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2688,6 +2688,14 @@ i915_gem_object_bind_to_gtt(struct drm_g + return -EINVAL; + } + ++ /* If the object is bigger than the entire aperture, reject it early ++ * before evicting everything in a vain attempt to find space. ++ */ ++ if (obj->size > dev->gtt_total) { ++ DRM_ERROR("Attempting to bind an object larger than the aperture\n"); ++ return -E2BIG; ++ } ++ + search_free: + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, + obj->size, alignment, 0); diff --git a/linux-2.6-usb-uvc-autosuspend.diff b/linux-2.6-usb-uvc-autosuspend.diff new file mode 100644 index 000000000..b7c7f6e0f --- /dev/null +++ b/linux-2.6-usb-uvc-autosuspend.diff @@ -0,0 +1,19 @@ +commit 9d4c919bcfa794c054cc33155c7e3c53ac2c5684 +Author: Matthew Garrett +Date: Sun Jul 19 02:24:49 2009 +0100 + + Enable autosuspend on UVC by default + +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index 89927b7..8de516b 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -1647,6 +1647,8 @@ static int uvc_probe(struct usb_interface *intf, + "supported.\n", ret); + } + ++ usb_device_autosuspend_enable(udev); ++ + uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n"); + return 0; + diff --git a/linux-2.6-usb-wwan-update.patch b/linux-2.6-usb-wwan-update.patch new file mode 100644 index 000000000..fbb5ad073 --- /dev/null +++ b/linux-2.6-usb-wwan-update.patch @@ -0,0 +1,1637 @@ +diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig +index a0ecb42..71238de 100644 +--- a/drivers/usb/serial/Kconfig ++++ b/drivers/usb/serial/Kconfig +@@ -485,6 +485,7 @@ config USB_SERIAL_QCAUX + + config USB_SERIAL_QUALCOMM + tristate "USB Qualcomm Serial modem" ++ select USB_SERIAL_WWAN + help + Say Y here if you have a Qualcomm USB modem device. These are + usually wireless cellular modems. +@@ -576,8 +577,12 @@ config USB_SERIAL_XIRCOM + To compile this driver as a module, choose M here: the + module will be called keyspan_pda. + ++config USB_SERIAL_WWAN ++ tristate ++ + config USB_SERIAL_OPTION + tristate "USB driver for GSM and CDMA modems" ++ select USB_SERIAL_WWAN + help + Say Y here if you have a GSM or CDMA modem that's connected to USB. + +diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile +index 83c9e43..7928cf4 100644 +--- a/drivers/usb/serial/Makefile ++++ b/drivers/usb/serial/Makefile +@@ -52,6 +52,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o + obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o + obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o + obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o ++obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o + obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o + obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o + obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 950cb31..10a9276 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -41,35 +41,14 @@ + #include + #include + #include ++#include "usb-wwan.h" + + /* Function prototypes */ + static int option_probe(struct usb_serial *serial, + const struct usb_device_id *id); +-static int option_open(struct tty_struct *tty, struct usb_serial_port *port); +-static void option_close(struct usb_serial_port *port); +-static void option_dtr_rts(struct usb_serial_port *port, int on); +- +-static int option_startup(struct usb_serial *serial); +-static void option_disconnect(struct usb_serial *serial); +-static void option_release(struct usb_serial *serial); +-static int option_write_room(struct tty_struct *tty); +- ++static int option_send_setup(struct usb_serial_port *port); + static void option_instat_callback(struct urb *urb); + +-static int option_write(struct tty_struct *tty, struct usb_serial_port *port, +- const unsigned char *buf, int count); +-static int option_chars_in_buffer(struct tty_struct *tty); +-static void option_set_termios(struct tty_struct *tty, +- struct usb_serial_port *port, struct ktermios *old); +-static int option_tiocmget(struct tty_struct *tty, struct file *file); +-static int option_tiocmset(struct tty_struct *tty, struct file *file, +- unsigned int set, unsigned int clear); +-static int option_send_setup(struct usb_serial_port *port); +-#ifdef CONFIG_PM +-static int option_suspend(struct usb_serial *serial, pm_message_t message); +-static int option_resume(struct usb_serial *serial); +-#endif +- + /* Vendor and product IDs */ + #define OPTION_VENDOR_ID 0x0AF0 + #define OPTION_PRODUCT_COLT 0x5000 +@@ -746,22 +725,22 @@ static struct usb_serial_driver option_1port_device = { + .id_table = option_ids, + .num_ports = 1, + .probe = option_probe, +- .open = option_open, +- .close = option_close, +- .dtr_rts = option_dtr_rts, +- .write = option_write, +- .write_room = option_write_room, +- .chars_in_buffer = option_chars_in_buffer, +- .set_termios = option_set_termios, +- .tiocmget = option_tiocmget, +- .tiocmset = option_tiocmset, +- .attach = option_startup, +- .disconnect = option_disconnect, +- .release = option_release, ++ .open = usb_wwan_open, ++ .close = usb_wwan_close, ++ .dtr_rts = usb_wwan_dtr_rts, ++ .write = usb_wwan_write, ++ .write_room = usb_wwan_write_room, ++ .chars_in_buffer = usb_wwan_chars_in_buffer, ++ .set_termios = usb_wwan_set_termios, ++ .tiocmget = usb_wwan_tiocmget, ++ .tiocmset = usb_wwan_tiocmset, ++ .attach = usb_wwan_startup, ++ .disconnect = usb_wwan_disconnect, ++ .release = usb_wwan_release, + .read_int_callback = option_instat_callback, + #ifdef CONFIG_PM +- .suspend = option_suspend, +- .resume = option_resume, ++ .suspend = usb_wwan_suspend, ++ .resume = usb_wwan_resume, + #endif + }; + +@@ -774,13 +753,6 @@ static int debug; + #define IN_BUFLEN 4096 + #define OUT_BUFLEN 4096 + +-struct option_intf_private { +- spinlock_t susp_lock; +- unsigned int suspended:1; +- int in_flight; +- struct option_blacklist_info *blacklist_info; +-}; +- + struct option_port_private { + /* Input endpoints and buffer for this port */ + struct urb *in_urbs[N_IN_URB]; +@@ -837,8 +809,7 @@ module_exit(option_exit); + static int option_probe(struct usb_serial *serial, + const struct usb_device_id *id) + { +- struct option_intf_private *data; +- ++ struct usb_wwan_intf_private *data; + /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ + if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && + serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && +@@ -851,11 +822,13 @@ static int option_probe(struct usb_serial *serial, + serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) + return -ENODEV; + +- data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL); ++ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); ++ + if (!data) + return -ENOMEM; ++ data->send_setup = option_send_setup; + spin_lock_init(&data->susp_lock); +- data->blacklist_info = (struct option_blacklist_info*) id->driver_info; ++ data->private = (void *)id->driver_info; + return 0; + } + +@@ -876,194 +849,6 @@ static enum option_blacklist_reason is_blacklisted(const u8 ifnum, + return OPTION_BLACKLIST_NONE; + } + +-static void option_set_termios(struct tty_struct *tty, +- struct usb_serial_port *port, struct ktermios *old_termios) +-{ +- dbg("%s", __func__); +- /* Doesn't support option setting */ +- tty_termios_copy_hw(tty->termios, old_termios); +- option_send_setup(port); +-} +- +-static int option_tiocmget(struct tty_struct *tty, struct file *file) +-{ +- struct usb_serial_port *port = tty->driver_data; +- unsigned int value; +- struct option_port_private *portdata; +- +- portdata = usb_get_serial_port_data(port); +- +- value = ((portdata->rts_state) ? TIOCM_RTS : 0) | +- ((portdata->dtr_state) ? TIOCM_DTR : 0) | +- ((portdata->cts_state) ? TIOCM_CTS : 0) | +- ((portdata->dsr_state) ? TIOCM_DSR : 0) | +- ((portdata->dcd_state) ? TIOCM_CAR : 0) | +- ((portdata->ri_state) ? TIOCM_RNG : 0); +- +- return value; +-} +- +-static int option_tiocmset(struct tty_struct *tty, struct file *file, +- unsigned int set, unsigned int clear) +-{ +- struct usb_serial_port *port = tty->driver_data; +- struct option_port_private *portdata; +- +- portdata = usb_get_serial_port_data(port); +- +- /* FIXME: what locks portdata fields ? */ +- if (set & TIOCM_RTS) +- portdata->rts_state = 1; +- if (set & TIOCM_DTR) +- portdata->dtr_state = 1; +- +- if (clear & TIOCM_RTS) +- portdata->rts_state = 0; +- if (clear & TIOCM_DTR) +- portdata->dtr_state = 0; +- return option_send_setup(port); +-} +- +-/* Write */ +-static int option_write(struct tty_struct *tty, struct usb_serial_port *port, +- const unsigned char *buf, int count) +-{ +- struct option_port_private *portdata; +- struct option_intf_private *intfdata; +- int i; +- int left, todo; +- struct urb *this_urb = NULL; /* spurious */ +- int err; +- unsigned long flags; +- +- portdata = usb_get_serial_port_data(port); +- intfdata = port->serial->private; +- +- dbg("%s: write (%d chars)", __func__, count); +- +- i = 0; +- left = count; +- for (i = 0; left > 0 && i < N_OUT_URB; i++) { +- todo = left; +- if (todo > OUT_BUFLEN) +- todo = OUT_BUFLEN; +- +- this_urb = portdata->out_urbs[i]; +- if (test_and_set_bit(i, &portdata->out_busy)) { +- if (time_before(jiffies, +- portdata->tx_start_time[i] + 10 * HZ)) +- continue; +- usb_unlink_urb(this_urb); +- continue; +- } +- dbg("%s: endpoint %d buf %d", __func__, +- usb_pipeendpoint(this_urb->pipe), i); +- +- err = usb_autopm_get_interface_async(port->serial->interface); +- if (err < 0) +- break; +- +- /* send the data */ +- memcpy(this_urb->transfer_buffer, buf, todo); +- this_urb->transfer_buffer_length = todo; +- +- spin_lock_irqsave(&intfdata->susp_lock, flags); +- if (intfdata->suspended) { +- usb_anchor_urb(this_urb, &portdata->delayed); +- spin_unlock_irqrestore(&intfdata->susp_lock, flags); +- } else { +- intfdata->in_flight++; +- spin_unlock_irqrestore(&intfdata->susp_lock, flags); +- err = usb_submit_urb(this_urb, GFP_ATOMIC); +- if (err) { +- dbg("usb_submit_urb %p (write bulk) failed " +- "(%d)", this_urb, err); +- clear_bit(i, &portdata->out_busy); +- spin_lock_irqsave(&intfdata->susp_lock, flags); +- intfdata->in_flight--; +- spin_unlock_irqrestore(&intfdata->susp_lock, flags); +- continue; +- } +- } +- +- portdata->tx_start_time[i] = jiffies; +- buf += todo; +- left -= todo; +- } +- +- count -= left; +- dbg("%s: wrote (did %d)", __func__, count); +- return count; +-} +- +-static void option_indat_callback(struct urb *urb) +-{ +- int err; +- int endpoint; +- struct usb_serial_port *port; +- struct tty_struct *tty; +- unsigned char *data = urb->transfer_buffer; +- int status = urb->status; +- +- dbg("%s: %p", __func__, urb); +- +- endpoint = usb_pipeendpoint(urb->pipe); +- port = urb->context; +- +- if (status) { +- dbg("%s: nonzero status: %d on endpoint %02x.", +- __func__, status, endpoint); +- } else { +- tty = tty_port_tty_get(&port->port); +- if (urb->actual_length) { +- tty_insert_flip_string(tty, data, urb->actual_length); +- tty_flip_buffer_push(tty); +- } else +- dbg("%s: empty read urb received", __func__); +- tty_kref_put(tty); +- +- /* Resubmit urb so we continue receiving */ +- if (status != -ESHUTDOWN) { +- err = usb_submit_urb(urb, GFP_ATOMIC); +- if (err && err != -EPERM) +- printk(KERN_ERR "%s: resubmit read urb failed. " +- "(%d)", __func__, err); +- else +- usb_mark_last_busy(port->serial->dev); +- } +- +- } +- return; +-} +- +-static void option_outdat_callback(struct urb *urb) +-{ +- struct usb_serial_port *port; +- struct option_port_private *portdata; +- struct option_intf_private *intfdata; +- int i; +- +- dbg("%s", __func__); +- +- port = urb->context; +- intfdata = port->serial->private; +- +- usb_serial_port_softint(port); +- usb_autopm_put_interface_async(port->serial->interface); +- portdata = usb_get_serial_port_data(port); +- spin_lock(&intfdata->susp_lock); +- intfdata->in_flight--; +- spin_unlock(&intfdata->susp_lock); +- +- for (i = 0; i < N_OUT_URB; ++i) { +- if (portdata->out_urbs[i] == urb) { +- smp_mb__before_clear_bit(); +- clear_bit(i, &portdata->out_busy); +- break; +- } +- } +-} +- + static void option_instat_callback(struct urb *urb) + { + int err; +@@ -1120,183 +905,6 @@ static void option_instat_callback(struct urb *urb) + } + } + +-static int option_write_room(struct tty_struct *tty) +-{ +- struct usb_serial_port *port = tty->driver_data; +- struct option_port_private *portdata; +- int i; +- int data_len = 0; +- struct urb *this_urb; +- +- portdata = usb_get_serial_port_data(port); +- +- for (i = 0; i < N_OUT_URB; i++) { +- this_urb = portdata->out_urbs[i]; +- if (this_urb && !test_bit(i, &portdata->out_busy)) +- data_len += OUT_BUFLEN; +- } +- +- dbg("%s: %d", __func__, data_len); +- return data_len; +-} +- +-static int option_chars_in_buffer(struct tty_struct *tty) +-{ +- struct usb_serial_port *port = tty->driver_data; +- struct option_port_private *portdata; +- int i; +- int data_len = 0; +- struct urb *this_urb; +- +- portdata = usb_get_serial_port_data(port); +- +- for (i = 0; i < N_OUT_URB; i++) { +- this_urb = portdata->out_urbs[i]; +- /* FIXME: This locking is insufficient as this_urb may +- go unused during the test */ +- if (this_urb && test_bit(i, &portdata->out_busy)) +- data_len += this_urb->transfer_buffer_length; +- } +- dbg("%s: %d", __func__, data_len); +- return data_len; +-} +- +-static int option_open(struct tty_struct *tty, struct usb_serial_port *port) +-{ +- struct option_port_private *portdata; +- struct option_intf_private *intfdata; +- struct usb_serial *serial = port->serial; +- int i, err; +- struct urb *urb; +- +- portdata = usb_get_serial_port_data(port); +- intfdata = serial->private; +- +- dbg("%s", __func__); +- +- /* Start reading from the IN endpoint */ +- for (i = 0; i < N_IN_URB; i++) { +- urb = portdata->in_urbs[i]; +- if (!urb) +- continue; +- err = usb_submit_urb(urb, GFP_KERNEL); +- if (err) { +- dbg("%s: submit urb %d failed (%d) %d", +- __func__, i, err, +- urb->transfer_buffer_length); +- } +- } +- +- option_send_setup(port); +- +- serial->interface->needs_remote_wakeup = 1; +- spin_lock_irq(&intfdata->susp_lock); +- portdata->opened = 1; +- spin_unlock_irq(&intfdata->susp_lock); +- usb_autopm_put_interface(serial->interface); +- +- return 0; +-} +- +-static void option_dtr_rts(struct usb_serial_port *port, int on) +-{ +- struct usb_serial *serial = port->serial; +- struct option_port_private *portdata; +- +- dbg("%s", __func__); +- portdata = usb_get_serial_port_data(port); +- mutex_lock(&serial->disc_mutex); +- portdata->rts_state = on; +- portdata->dtr_state = on; +- if (serial->dev) +- option_send_setup(port); +- mutex_unlock(&serial->disc_mutex); +-} +- +- +-static void option_close(struct usb_serial_port *port) +-{ +- int i; +- struct usb_serial *serial = port->serial; +- struct option_port_private *portdata; +- struct option_intf_private *intfdata = port->serial->private; +- +- dbg("%s", __func__); +- portdata = usb_get_serial_port_data(port); +- +- if (serial->dev) { +- /* Stop reading/writing urbs */ +- spin_lock_irq(&intfdata->susp_lock); +- portdata->opened = 0; +- spin_unlock_irq(&intfdata->susp_lock); +- +- for (i = 0; i < N_IN_URB; i++) +- usb_kill_urb(portdata->in_urbs[i]); +- for (i = 0; i < N_OUT_URB; i++) +- usb_kill_urb(portdata->out_urbs[i]); +- usb_autopm_get_interface(serial->interface); +- serial->interface->needs_remote_wakeup = 0; +- } +-} +- +-/* Helper functions used by option_setup_urbs */ +-static struct urb *option_setup_urb(struct usb_serial *serial, int endpoint, +- int dir, void *ctx, char *buf, int len, +- void (*callback)(struct urb *)) +-{ +- struct urb *urb; +- +- if (endpoint == -1) +- return NULL; /* endpoint not needed */ +- +- urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ +- if (urb == NULL) { +- dbg("%s: alloc for endpoint %d failed.", __func__, endpoint); +- return NULL; +- } +- +- /* Fill URB using supplied data. */ +- usb_fill_bulk_urb(urb, serial->dev, +- usb_sndbulkpipe(serial->dev, endpoint) | dir, +- buf, len, callback, ctx); +- +- return urb; +-} +- +-/* Setup urbs */ +-static void option_setup_urbs(struct usb_serial *serial) +-{ +- int i, j; +- struct usb_serial_port *port; +- struct option_port_private *portdata; +- +- dbg("%s", __func__); +- +- for (i = 0; i < serial->num_ports; i++) { +- port = serial->port[i]; +- portdata = usb_get_serial_port_data(port); +- +- /* Do indat endpoints first */ +- for (j = 0; j < N_IN_URB; ++j) { +- portdata->in_urbs[j] = option_setup_urb(serial, +- port->bulk_in_endpointAddress, +- USB_DIR_IN, port, +- portdata->in_buffer[j], +- IN_BUFLEN, option_indat_callback); +- } +- +- /* outdat endpoints */ +- for (j = 0; j < N_OUT_URB; ++j) { +- portdata->out_urbs[j] = option_setup_urb(serial, +- port->bulk_out_endpointAddress, +- USB_DIR_OUT, port, +- portdata->out_buffer[j], +- OUT_BUFLEN, option_outdat_callback); +- } +- } +-} +- +- + /** send RTS/DTR state to the port. + * + * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN +@@ -1305,15 +913,16 @@ static void option_setup_urbs(struct usb_serial *serial) + static int option_send_setup(struct usb_serial_port *port) + { + struct usb_serial *serial = port->serial; +- struct option_intf_private *intfdata = +- (struct option_intf_private *) serial->private; ++ struct usb_wwan_intf_private *intfdata = ++ (struct usb_wwan_intf_private *) serial->private; + struct option_port_private *portdata; + int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; + int val = 0; + dbg("%s", __func__); + +- if (is_blacklisted(ifNum, intfdata->blacklist_info) == +- OPTION_BLACKLIST_SENDSETUP) { ++ if (is_blacklisted(ifNum, ++ (struct option_blacklist_info *) intfdata->private) ++ == OPTION_BLACKLIST_SENDSETUP) { + dbg("No send_setup on blacklisted interface #%d\n", ifNum); + return -EIO; + } +@@ -1330,224 +939,6 @@ static int option_send_setup(struct usb_serial_port *port) + 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT); + } + +-static int option_startup(struct usb_serial *serial) +-{ +- int i, j, err; +- struct usb_serial_port *port; +- struct option_port_private *portdata; +- u8 *buffer; +- +- dbg("%s", __func__); +- +- /* Now setup per port private data */ +- for (i = 0; i < serial->num_ports; i++) { +- port = serial->port[i]; +- portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); +- if (!portdata) { +- dbg("%s: kmalloc for option_port_private (%d) failed!.", +- __func__, i); +- return 1; +- } +- init_usb_anchor(&portdata->delayed); +- +- for (j = 0; j < N_IN_URB; j++) { +- buffer = (u8 *)__get_free_page(GFP_KERNEL); +- if (!buffer) +- goto bail_out_error; +- portdata->in_buffer[j] = buffer; +- } +- +- for (j = 0; j < N_OUT_URB; j++) { +- buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); +- if (!buffer) +- goto bail_out_error2; +- portdata->out_buffer[j] = buffer; +- } +- +- usb_set_serial_port_data(port, portdata); +- +- if (!port->interrupt_in_urb) +- continue; +- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); +- if (err) +- dbg("%s: submit irq_in urb failed %d", +- __func__, err); +- } +- option_setup_urbs(serial); +- return 0; +- +-bail_out_error2: +- for (j = 0; j < N_OUT_URB; j++) +- kfree(portdata->out_buffer[j]); +-bail_out_error: +- for (j = 0; j < N_IN_URB; j++) +- if (portdata->in_buffer[j]) +- free_page((unsigned long)portdata->in_buffer[j]); +- kfree(portdata); +- return 1; +-} +- +-static void stop_read_write_urbs(struct usb_serial *serial) +-{ +- int i, j; +- struct usb_serial_port *port; +- struct option_port_private *portdata; +- +- /* Stop reading/writing urbs */ +- for (i = 0; i < serial->num_ports; ++i) { +- port = serial->port[i]; +- portdata = usb_get_serial_port_data(port); +- for (j = 0; j < N_IN_URB; j++) +- usb_kill_urb(portdata->in_urbs[j]); +- for (j = 0; j < N_OUT_URB; j++) +- usb_kill_urb(portdata->out_urbs[j]); +- } +-} +- +-static void option_disconnect(struct usb_serial *serial) +-{ +- dbg("%s", __func__); +- +- stop_read_write_urbs(serial); +-} +- +-static void option_release(struct usb_serial *serial) +-{ +- int i, j; +- struct usb_serial_port *port; +- struct option_port_private *portdata; +- +- dbg("%s", __func__); +- +- /* Now free them */ +- for (i = 0; i < serial->num_ports; ++i) { +- port = serial->port[i]; +- portdata = usb_get_serial_port_data(port); +- +- for (j = 0; j < N_IN_URB; j++) { +- if (portdata->in_urbs[j]) { +- usb_free_urb(portdata->in_urbs[j]); +- free_page((unsigned long) +- portdata->in_buffer[j]); +- portdata->in_urbs[j] = NULL; +- } +- } +- for (j = 0; j < N_OUT_URB; j++) { +- if (portdata->out_urbs[j]) { +- usb_free_urb(portdata->out_urbs[j]); +- kfree(portdata->out_buffer[j]); +- portdata->out_urbs[j] = NULL; +- } +- } +- } +- +- /* Now free per port private data */ +- for (i = 0; i < serial->num_ports; i++) { +- port = serial->port[i]; +- kfree(usb_get_serial_port_data(port)); +- } +-} +- +-#ifdef CONFIG_PM +-static int option_suspend(struct usb_serial *serial, pm_message_t message) +-{ +- struct option_intf_private *intfdata = serial->private; +- int b; +- +- dbg("%s entered", __func__); +- +- if (message.event & PM_EVENT_AUTO) { +- spin_lock_irq(&intfdata->susp_lock); +- b = intfdata->in_flight; +- spin_unlock_irq(&intfdata->susp_lock); +- +- if (b) +- return -EBUSY; +- } +- +- spin_lock_irq(&intfdata->susp_lock); +- intfdata->suspended = 1; +- spin_unlock_irq(&intfdata->susp_lock); +- stop_read_write_urbs(serial); +- +- return 0; +-} +- +-static void play_delayed(struct usb_serial_port *port) +-{ +- struct option_intf_private *data; +- struct option_port_private *portdata; +- struct urb *urb; +- int err; +- +- portdata = usb_get_serial_port_data(port); +- data = port->serial->private; +- while ((urb = usb_get_from_anchor(&portdata->delayed))) { +- err = usb_submit_urb(urb, GFP_ATOMIC); +- if (!err) +- data->in_flight++; +- } +-} +- +-static int option_resume(struct usb_serial *serial) +-{ +- int i, j; +- struct usb_serial_port *port; +- struct option_intf_private *intfdata = serial->private; +- struct option_port_private *portdata; +- struct urb *urb; +- int err = 0; +- +- dbg("%s entered", __func__); +- /* get the interrupt URBs resubmitted unconditionally */ +- for (i = 0; i < serial->num_ports; i++) { +- port = serial->port[i]; +- if (!port->interrupt_in_urb) { +- dbg("%s: No interrupt URB for port %d", __func__, i); +- continue; +- } +- err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); +- dbg("Submitted interrupt URB for port %d (result %d)", i, err); +- if (err < 0) { +- err("%s: Error %d for interrupt URB of port%d", +- __func__, err, i); +- goto err_out; +- } +- } +- +- for (i = 0; i < serial->num_ports; i++) { +- /* walk all ports */ +- port = serial->port[i]; +- portdata = usb_get_serial_port_data(port); +- +- /* skip closed ports */ +- spin_lock_irq(&intfdata->susp_lock); +- if (!portdata->opened) { +- spin_unlock_irq(&intfdata->susp_lock); +- continue; +- } +- +- for (j = 0; j < N_IN_URB; j++) { +- urb = portdata->in_urbs[j]; +- err = usb_submit_urb(urb, GFP_ATOMIC); +- if (err < 0) { +- err("%s: Error %d for bulk URB %d", +- __func__, err, i); +- spin_unlock_irq(&intfdata->susp_lock); +- goto err_out; +- } +- } +- play_delayed(port); +- spin_unlock_irq(&intfdata->susp_lock); +- } +- spin_lock_irq(&intfdata->susp_lock); +- intfdata->suspended = 0; +- spin_unlock_irq(&intfdata->susp_lock); +-err_out: +- return err; +-} +-#endif +- + MODULE_AUTHOR(DRIVER_AUTHOR); + MODULE_DESCRIPTION(DRIVER_DESC); + MODULE_VERSION(DRIVER_VERSION); +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 53a2d5a..9e55ef5 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include "usb-wwan.h" + + #define DRIVER_AUTHOR "Qualcomm Inc" + #define DRIVER_DESC "Qualcomm USB Serial driver" +@@ -76,6 +77,8 @@ static const struct usb_device_id id_table[] = { + {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ + {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ ++ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ ++ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, id_table); +@@ -92,6 +95,8 @@ static struct usb_driver qcdriver = { + + static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) + { ++ struct usb_wwan_intf_private *data; ++ struct usb_host_interface *intf = serial->interface->cur_altsetting; + int retval = -ENODEV; + __u8 nintf; + __u8 ifnum; +@@ -100,33 +105,45 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) + + nintf = serial->dev->actconfig->desc.bNumInterfaces; + dbg("Num Interfaces = %d", nintf); +- ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; ++ ifnum = intf->desc.bInterfaceNumber; + dbg("This Interface = %d", ifnum); + ++ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), ++ GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ spin_lock_init(&data->susp_lock); ++ + switch (nintf) { + case 1: + /* QDL mode */ +- if (serial->interface->num_altsetting == 2) { +- struct usb_host_interface *intf; +- ++ /* Gobi 2000 has a single altsetting, older ones have two */ ++ if (serial->interface->num_altsetting == 2) + intf = &serial->interface->altsetting[1]; +- if (intf->desc.bNumEndpoints == 2) { +- if (usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) && +- usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { +- dbg("QDL port found"); +- retval = usb_set_interface(serial->dev, ifnum, 1); +- if (retval < 0) { +- dev_err(&serial->dev->dev, +- "Could not set interface, error %d\n", +- retval); +- retval = -ENODEV; +- } +- return retval; +- } ++ else if (serial->interface->num_altsetting > 2) ++ break; ++ ++ if (intf->desc.bNumEndpoints == 2 && ++ usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) && ++ usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { ++ dbg("QDL port found"); ++ ++ if (serial->interface->num_altsetting == 1) ++ return 0; ++ ++ retval = usb_set_interface(serial->dev, ifnum, 1); ++ if (retval < 0) { ++ dev_err(&serial->dev->dev, ++ "Could not set interface, error %d\n", ++ retval); ++ retval = -ENODEV; + } ++ return retval; + } + break; + ++ case 3: + case 4: + /* Composite mode */ + if (ifnum == 2) { +@@ -161,6 +178,18 @@ static struct usb_serial_driver qcdevice = { + .usb_driver = &qcdriver, + .num_ports = 1, + .probe = qcprobe, ++ .open = usb_wwan_open, ++ .close = usb_wwan_close, ++ .write = usb_wwan_write, ++ .write_room = usb_wwan_write_room, ++ .chars_in_buffer = usb_wwan_chars_in_buffer, ++ .attach = usb_wwan_startup, ++ .disconnect = usb_wwan_disconnect, ++ .release = usb_wwan_release, ++#ifdef CONFIG_PM ++ .suspend = usb_wwan_suspend, ++ .resume = usb_wwan_resume, ++#endif + }; + + static int __init qcinit(void) +diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h +new file mode 100644 +index 0000000..2be298a +--- /dev/null ++++ b/drivers/usb/serial/usb-wwan.h +@@ -0,0 +1,67 @@ ++/* ++ * Definitions for USB serial mobile broadband cards ++ */ ++ ++#ifndef __LINUX_USB_USB_WWAN ++#define __LINUX_USB_USB_WWAN ++ ++extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); ++extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); ++extern void usb_wwan_close(struct usb_serial_port *port); ++extern int usb_wwan_startup(struct usb_serial *serial); ++extern void usb_wwan_disconnect(struct usb_serial *serial); ++extern void usb_wwan_release(struct usb_serial *serial); ++extern int usb_wwan_write_room(struct tty_struct *tty); ++extern void usb_wwan_set_termios(struct tty_struct *tty, ++ struct usb_serial_port *port, ++ struct ktermios *old); ++extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file); ++extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file, ++ unsigned int set, unsigned int clear); ++extern int usb_wwan_send_setup(struct usb_serial_port *port); ++extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, ++ const unsigned char *buf, int count); ++extern int usb_wwan_chars_in_buffer(struct tty_struct *tty); ++#ifdef CONFIG_PM ++extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message); ++extern int usb_wwan_resume(struct usb_serial *serial); ++#endif ++ ++/* per port private data */ ++ ++#define N_IN_URB 4 ++#define N_OUT_URB 4 ++#define IN_BUFLEN 4096 ++#define OUT_BUFLEN 4096 ++ ++struct usb_wwan_intf_private { ++ spinlock_t susp_lock; ++ unsigned int suspended:1; ++ int in_flight; ++ int (*send_setup) (struct usb_serial_port *port); ++ void *private; ++}; ++ ++struct usb_wwan_port_private { ++ /* Input endpoints and buffer for this port */ ++ struct urb *in_urbs[N_IN_URB]; ++ u8 *in_buffer[N_IN_URB]; ++ /* Output endpoints and buffer for this port */ ++ struct urb *out_urbs[N_OUT_URB]; ++ u8 *out_buffer[N_OUT_URB]; ++ unsigned long out_busy; /* Bit vector of URBs in use */ ++ int opened; ++ struct usb_anchor delayed; ++ ++ /* Settings for the port */ ++ int rts_state; /* Handshaking pins (outputs) */ ++ int dtr_state; ++ int cts_state; /* Handshaking pins (inputs) */ ++ int dsr_state; ++ int dcd_state; ++ int ri_state; ++ ++ unsigned long tx_start_time[N_OUT_URB]; ++}; ++ ++#endif /* __LINUX_USB_USB_WWAN */ +diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c +new file mode 100644 +index 0000000..1ccf852 +--- /dev/null ++++ b/drivers/usb/serial/usb_wwan.c +@@ -0,0 +1,665 @@ ++/* ++ USB Driver layer for GSM modems ++ ++ Copyright (C) 2005 Matthias Urlichs ++ ++ This driver is free software; you can redistribute it and/or modify ++ it under the terms of Version 2 of the GNU General Public License as ++ published by the Free Software Foundation. ++ ++ Portions copied from the Keyspan driver by Hugh Blemings ++ ++ History: see the git log. ++ ++ Work sponsored by: Sigos GmbH, Germany ++ ++ This driver exists because the "normal" serial driver doesn't work too well ++ with GSM modems. Issues: ++ - data loss -- one single Receive URB is not nearly enough ++ - controlling the baud rate doesn't make sense ++*/ ++ ++#define DRIVER_VERSION "v0.7.2" ++#define DRIVER_AUTHOR "Matthias Urlichs " ++#define DRIVER_DESC "USB Driver for GSM modems" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "usb-wwan.h" ++ ++static int debug; ++ ++void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) ++{ ++ struct usb_serial *serial = port->serial; ++ struct usb_wwan_port_private *portdata; ++ ++ struct usb_wwan_intf_private *intfdata; ++ ++ dbg("%s", __func__); ++ ++ intfdata = port->serial->private; ++ ++ if (!intfdata->send_setup) ++ return; ++ ++ portdata = usb_get_serial_port_data(port); ++ mutex_lock(&serial->disc_mutex); ++ portdata->rts_state = on; ++ portdata->dtr_state = on; ++ if (serial->dev) ++ intfdata->send_setup(port); ++ mutex_unlock(&serial->disc_mutex); ++} ++EXPORT_SYMBOL(usb_wwan_dtr_rts); ++ ++void usb_wwan_set_termios(struct tty_struct *tty, ++ struct usb_serial_port *port, ++ struct ktermios *old_termios) ++{ ++ struct usb_wwan_intf_private *intfdata = port->serial->private; ++ ++ dbg("%s", __func__); ++ ++ /* Doesn't support option setting */ ++ tty_termios_copy_hw(tty->termios, old_termios); ++ ++ if (intfdata->send_setup) ++ intfdata->send_setup(port); ++} ++EXPORT_SYMBOL(usb_wwan_set_termios); ++ ++int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file) ++{ ++ struct usb_serial_port *port = tty->driver_data; ++ unsigned int value; ++ struct usb_wwan_port_private *portdata; ++ ++ portdata = usb_get_serial_port_data(port); ++ ++ value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ++ ((portdata->dtr_state) ? TIOCM_DTR : 0) | ++ ((portdata->cts_state) ? TIOCM_CTS : 0) | ++ ((portdata->dsr_state) ? TIOCM_DSR : 0) | ++ ((portdata->dcd_state) ? TIOCM_CAR : 0) | ++ ((portdata->ri_state) ? TIOCM_RNG : 0); ++ ++ return value; ++} ++EXPORT_SYMBOL(usb_wwan_tiocmget); ++ ++int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file, ++ unsigned int set, unsigned int clear) ++{ ++ struct usb_serial_port *port = tty->driver_data; ++ struct usb_wwan_port_private *portdata; ++ struct usb_wwan_intf_private *intfdata; ++ ++ portdata = usb_get_serial_port_data(port); ++ intfdata = port->serial->private; ++ ++ if (!intfdata->send_setup) ++ return -EINVAL; ++ ++ /* FIXME: what locks portdata fields ? */ ++ if (set & TIOCM_RTS) ++ portdata->rts_state = 1; ++ if (set & TIOCM_DTR) ++ portdata->dtr_state = 1; ++ ++ if (clear & TIOCM_RTS) ++ portdata->rts_state = 0; ++ if (clear & TIOCM_DTR) ++ portdata->dtr_state = 0; ++ return intfdata->send_setup(port); ++} ++EXPORT_SYMBOL(usb_wwan_tiocmset); ++ ++/* Write */ ++int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, ++ const unsigned char *buf, int count) ++{ ++ struct usb_wwan_port_private *portdata; ++ struct usb_wwan_intf_private *intfdata; ++ int i; ++ int left, todo; ++ struct urb *this_urb = NULL; /* spurious */ ++ int err; ++ unsigned long flags; ++ ++ portdata = usb_get_serial_port_data(port); ++ intfdata = port->serial->private; ++ ++ dbg("%s: write (%d chars)", __func__, count); ++ ++ i = 0; ++ left = count; ++ for (i = 0; left > 0 && i < N_OUT_URB; i++) { ++ todo = left; ++ if (todo > OUT_BUFLEN) ++ todo = OUT_BUFLEN; ++ ++ this_urb = portdata->out_urbs[i]; ++ if (test_and_set_bit(i, &portdata->out_busy)) { ++ if (time_before(jiffies, ++ portdata->tx_start_time[i] + 10 * HZ)) ++ continue; ++ usb_unlink_urb(this_urb); ++ continue; ++ } ++ dbg("%s: endpoint %d buf %d", __func__, ++ usb_pipeendpoint(this_urb->pipe), i); ++ ++ err = usb_autopm_get_interface_async(port->serial->interface); ++ if (err < 0) ++ break; ++ ++ /* send the data */ ++ memcpy(this_urb->transfer_buffer, buf, todo); ++ this_urb->transfer_buffer_length = todo; ++ ++ spin_lock_irqsave(&intfdata->susp_lock, flags); ++ if (intfdata->suspended) { ++ usb_anchor_urb(this_urb, &portdata->delayed); ++ spin_unlock_irqrestore(&intfdata->susp_lock, flags); ++ } else { ++ intfdata->in_flight++; ++ spin_unlock_irqrestore(&intfdata->susp_lock, flags); ++ err = usb_submit_urb(this_urb, GFP_ATOMIC); ++ if (err) { ++ dbg("usb_submit_urb %p (write bulk) failed " ++ "(%d)", this_urb, err); ++ clear_bit(i, &portdata->out_busy); ++ spin_lock_irqsave(&intfdata->susp_lock, flags); ++ intfdata->in_flight--; ++ spin_unlock_irqrestore(&intfdata->susp_lock, ++ flags); ++ continue; ++ } ++ } ++ ++ portdata->tx_start_time[i] = jiffies; ++ buf += todo; ++ left -= todo; ++ } ++ ++ count -= left; ++ dbg("%s: wrote (did %d)", __func__, count); ++ return count; ++} ++EXPORT_SYMBOL(usb_wwan_write); ++ ++static void usb_wwan_indat_callback(struct urb *urb) ++{ ++ int err; ++ int endpoint; ++ struct usb_serial_port *port; ++ struct tty_struct *tty; ++ unsigned char *data = urb->transfer_buffer; ++ int status = urb->status; ++ ++ dbg("%s: %p", __func__, urb); ++ ++ endpoint = usb_pipeendpoint(urb->pipe); ++ port = urb->context; ++ ++ if (status) { ++ dbg("%s: nonzero status: %d on endpoint %02x.", ++ __func__, status, endpoint); ++ } else { ++ tty = tty_port_tty_get(&port->port); ++ if (urb->actual_length) { ++ tty_insert_flip_string(tty, data, urb->actual_length); ++ tty_flip_buffer_push(tty); ++ } else ++ dbg("%s: empty read urb received", __func__); ++ tty_kref_put(tty); ++ ++ /* Resubmit urb so we continue receiving */ ++ if (status != -ESHUTDOWN) { ++ err = usb_submit_urb(urb, GFP_ATOMIC); ++ if (err && err != -EPERM) ++ printk(KERN_ERR "%s: resubmit read urb failed. " ++ "(%d)", __func__, err); ++ else ++ usb_mark_last_busy(port->serial->dev); ++ } ++ ++ } ++ return; ++} ++ ++static void usb_wwan_outdat_callback(struct urb *urb) ++{ ++ struct usb_serial_port *port; ++ struct usb_wwan_port_private *portdata; ++ struct usb_wwan_intf_private *intfdata; ++ int i; ++ ++ dbg("%s", __func__); ++ ++ port = urb->context; ++ intfdata = port->serial->private; ++ ++ usb_serial_port_softint(port); ++ usb_autopm_put_interface_async(port->serial->interface); ++ portdata = usb_get_serial_port_data(port); ++ spin_lock(&intfdata->susp_lock); ++ intfdata->in_flight--; ++ spin_unlock(&intfdata->susp_lock); ++ ++ for (i = 0; i < N_OUT_URB; ++i) { ++ if (portdata->out_urbs[i] == urb) { ++ smp_mb__before_clear_bit(); ++ clear_bit(i, &portdata->out_busy); ++ break; ++ } ++ } ++} ++ ++int usb_wwan_write_room(struct tty_struct *tty) ++{ ++ struct usb_serial_port *port = tty->driver_data; ++ struct usb_wwan_port_private *portdata; ++ int i; ++ int data_len = 0; ++ struct urb *this_urb; ++ ++ portdata = usb_get_serial_port_data(port); ++ ++ for (i = 0; i < N_OUT_URB; i++) { ++ this_urb = portdata->out_urbs[i]; ++ if (this_urb && !test_bit(i, &portdata->out_busy)) ++ data_len += OUT_BUFLEN; ++ } ++ ++ dbg("%s: %d", __func__, data_len); ++ return data_len; ++} ++EXPORT_SYMBOL(usb_wwan_write_room); ++ ++int usb_wwan_chars_in_buffer(struct tty_struct *tty) ++{ ++ struct usb_serial_port *port = tty->driver_data; ++ struct usb_wwan_port_private *portdata; ++ int i; ++ int data_len = 0; ++ struct urb *this_urb; ++ ++ portdata = usb_get_serial_port_data(port); ++ ++ for (i = 0; i < N_OUT_URB; i++) { ++ this_urb = portdata->out_urbs[i]; ++ /* FIXME: This locking is insufficient as this_urb may ++ go unused during the test */ ++ if (this_urb && test_bit(i, &portdata->out_busy)) ++ data_len += this_urb->transfer_buffer_length; ++ } ++ dbg("%s: %d", __func__, data_len); ++ return data_len; ++} ++EXPORT_SYMBOL(usb_wwan_chars_in_buffer); ++ ++int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) ++{ ++ struct usb_wwan_port_private *portdata; ++ struct usb_wwan_intf_private *intfdata; ++ struct usb_serial *serial = port->serial; ++ int i, err; ++ struct urb *urb; ++ ++ portdata = usb_get_serial_port_data(port); ++ intfdata = serial->private; ++ ++ dbg("%s", __func__); ++ ++ /* Start reading from the IN endpoint */ ++ for (i = 0; i < N_IN_URB; i++) { ++ urb = portdata->in_urbs[i]; ++ if (!urb) ++ continue; ++ err = usb_submit_urb(urb, GFP_KERNEL); ++ if (err) { ++ dbg("%s: submit urb %d failed (%d) %d", ++ __func__, i, err, urb->transfer_buffer_length); ++ } ++ } ++ ++ if (intfdata->send_setup) ++ intfdata->send_setup(port); ++ ++ serial->interface->needs_remote_wakeup = 1; ++ spin_lock_irq(&intfdata->susp_lock); ++ portdata->opened = 1; ++ spin_unlock_irq(&intfdata->susp_lock); ++ usb_autopm_put_interface(serial->interface); ++ ++ return 0; ++} ++EXPORT_SYMBOL(usb_wwan_open); ++ ++void usb_wwan_close(struct usb_serial_port *port) ++{ ++ int i; ++ struct usb_serial *serial = port->serial; ++ struct usb_wwan_port_private *portdata; ++ struct usb_wwan_intf_private *intfdata = port->serial->private; ++ ++ dbg("%s", __func__); ++ portdata = usb_get_serial_port_data(port); ++ ++ if (serial->dev) { ++ /* Stop reading/writing urbs */ ++ spin_lock_irq(&intfdata->susp_lock); ++ portdata->opened = 0; ++ spin_unlock_irq(&intfdata->susp_lock); ++ ++ for (i = 0; i < N_IN_URB; i++) ++ usb_kill_urb(portdata->in_urbs[i]); ++ for (i = 0; i < N_OUT_URB; i++) ++ usb_kill_urb(portdata->out_urbs[i]); ++ usb_autopm_get_interface(serial->interface); ++ serial->interface->needs_remote_wakeup = 0; ++ } ++} ++EXPORT_SYMBOL(usb_wwan_close); ++ ++/* Helper functions used by usb_wwan_setup_urbs */ ++static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint, ++ int dir, void *ctx, char *buf, int len, ++ void (*callback) (struct urb *)) ++{ ++ struct urb *urb; ++ ++ if (endpoint == -1) ++ return NULL; /* endpoint not needed */ ++ ++ urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ ++ if (urb == NULL) { ++ dbg("%s: alloc for endpoint %d failed.", __func__, endpoint); ++ return NULL; ++ } ++ ++ /* Fill URB using supplied data. */ ++ usb_fill_bulk_urb(urb, serial->dev, ++ usb_sndbulkpipe(serial->dev, endpoint) | dir, ++ buf, len, callback, ctx); ++ ++ return urb; ++} ++ ++/* Setup urbs */ ++static void usb_wwan_setup_urbs(struct usb_serial *serial) ++{ ++ int i, j; ++ struct usb_serial_port *port; ++ struct usb_wwan_port_private *portdata; ++ ++ dbg("%s", __func__); ++ ++ for (i = 0; i < serial->num_ports; i++) { ++ port = serial->port[i]; ++ portdata = usb_get_serial_port_data(port); ++ ++ /* Do indat endpoints first */ ++ for (j = 0; j < N_IN_URB; ++j) { ++ portdata->in_urbs[j] = usb_wwan_setup_urb(serial, ++ port-> ++ bulk_in_endpointAddress, ++ USB_DIR_IN, ++ port, ++ portdata-> ++ in_buffer[j], ++ IN_BUFLEN, ++ usb_wwan_indat_callback); ++ } ++ ++ /* outdat endpoints */ ++ for (j = 0; j < N_OUT_URB; ++j) { ++ portdata->out_urbs[j] = usb_wwan_setup_urb(serial, ++ port-> ++ bulk_out_endpointAddress, ++ USB_DIR_OUT, ++ port, ++ portdata-> ++ out_buffer ++ [j], ++ OUT_BUFLEN, ++ usb_wwan_outdat_callback); ++ } ++ } ++} ++ ++int usb_wwan_startup(struct usb_serial *serial) ++{ ++ int i, j, err; ++ struct usb_serial_port *port; ++ struct usb_wwan_port_private *portdata; ++ u8 *buffer; ++ ++ dbg("%s", __func__); ++ ++ /* Now setup per port private data */ ++ for (i = 0; i < serial->num_ports; i++) { ++ port = serial->port[i]; ++ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); ++ if (!portdata) { ++ dbg("%s: kmalloc for usb_wwan_port_private (%d) failed!.", ++ __func__, i); ++ return 1; ++ } ++ init_usb_anchor(&portdata->delayed); ++ ++ for (j = 0; j < N_IN_URB; j++) { ++ buffer = (u8 *) __get_free_page(GFP_KERNEL); ++ if (!buffer) ++ goto bail_out_error; ++ portdata->in_buffer[j] = buffer; ++ } ++ ++ for (j = 0; j < N_OUT_URB; j++) { ++ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); ++ if (!buffer) ++ goto bail_out_error2; ++ portdata->out_buffer[j] = buffer; ++ } ++ ++ usb_set_serial_port_data(port, portdata); ++ ++ if (!port->interrupt_in_urb) ++ continue; ++ err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); ++ if (err) ++ dbg("%s: submit irq_in urb failed %d", __func__, err); ++ } ++ usb_wwan_setup_urbs(serial); ++ return 0; ++ ++bail_out_error2: ++ for (j = 0; j < N_OUT_URB; j++) ++ kfree(portdata->out_buffer[j]); ++bail_out_error: ++ for (j = 0; j < N_IN_URB; j++) ++ if (portdata->in_buffer[j]) ++ free_page((unsigned long)portdata->in_buffer[j]); ++ kfree(portdata); ++ return 1; ++} ++EXPORT_SYMBOL(usb_wwan_startup); ++ ++static void stop_read_write_urbs(struct usb_serial *serial) ++{ ++ int i, j; ++ struct usb_serial_port *port; ++ struct usb_wwan_port_private *portdata; ++ ++ /* Stop reading/writing urbs */ ++ for (i = 0; i < serial->num_ports; ++i) { ++ port = serial->port[i]; ++ portdata = usb_get_serial_port_data(port); ++ for (j = 0; j < N_IN_URB; j++) ++ usb_kill_urb(portdata->in_urbs[j]); ++ for (j = 0; j < N_OUT_URB; j++) ++ usb_kill_urb(portdata->out_urbs[j]); ++ } ++} ++ ++void usb_wwan_disconnect(struct usb_serial *serial) ++{ ++ dbg("%s", __func__); ++ ++ stop_read_write_urbs(serial); ++} ++EXPORT_SYMBOL(usb_wwan_disconnect); ++ ++void usb_wwan_release(struct usb_serial *serial) ++{ ++ int i, j; ++ struct usb_serial_port *port; ++ struct usb_wwan_port_private *portdata; ++ ++ dbg("%s", __func__); ++ ++ /* Now free them */ ++ for (i = 0; i < serial->num_ports; ++i) { ++ port = serial->port[i]; ++ portdata = usb_get_serial_port_data(port); ++ ++ for (j = 0; j < N_IN_URB; j++) { ++ usb_free_urb(portdata->in_urbs[j]); ++ free_page((unsigned long) ++ portdata->in_buffer[j]); ++ portdata->in_urbs[j] = NULL; ++ } ++ for (j = 0; j < N_OUT_URB; j++) { ++ usb_free_urb(portdata->out_urbs[j]); ++ kfree(portdata->out_buffer[j]); ++ portdata->out_urbs[j] = NULL; ++ } ++ } ++ ++ /* Now free per port private data */ ++ for (i = 0; i < serial->num_ports; i++) { ++ port = serial->port[i]; ++ kfree(usb_get_serial_port_data(port)); ++ } ++} ++EXPORT_SYMBOL(usb_wwan_release); ++ ++#ifdef CONFIG_PM ++int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) ++{ ++ struct usb_wwan_intf_private *intfdata = serial->private; ++ int b; ++ ++ dbg("%s entered", __func__); ++ ++ if (message.event & PM_EVENT_AUTO) { ++ spin_lock_irq(&intfdata->susp_lock); ++ b = intfdata->in_flight; ++ spin_unlock_irq(&intfdata->susp_lock); ++ ++ if (b) ++ return -EBUSY; ++ } ++ ++ spin_lock_irq(&intfdata->susp_lock); ++ intfdata->suspended = 1; ++ spin_unlock_irq(&intfdata->susp_lock); ++ stop_read_write_urbs(serial); ++ ++ return 0; ++} ++EXPORT_SYMBOL(usb_wwan_suspend); ++ ++static void play_delayed(struct usb_serial_port *port) ++{ ++ struct usb_wwan_intf_private *data; ++ struct usb_wwan_port_private *portdata; ++ struct urb *urb; ++ int err; ++ ++ portdata = usb_get_serial_port_data(port); ++ data = port->serial->private; ++ while ((urb = usb_get_from_anchor(&portdata->delayed))) { ++ err = usb_submit_urb(urb, GFP_ATOMIC); ++ if (!err) ++ data->in_flight++; ++ } ++} ++ ++int usb_wwan_resume(struct usb_serial *serial) ++{ ++ int i, j; ++ struct usb_serial_port *port; ++ struct usb_wwan_intf_private *intfdata = serial->private; ++ struct usb_wwan_port_private *portdata; ++ struct urb *urb; ++ int err = 0; ++ ++ dbg("%s entered", __func__); ++ /* get the interrupt URBs resubmitted unconditionally */ ++ for (i = 0; i < serial->num_ports; i++) { ++ port = serial->port[i]; ++ if (!port->interrupt_in_urb) { ++ dbg("%s: No interrupt URB for port %d", __func__, i); ++ continue; ++ } ++ err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); ++ dbg("Submitted interrupt URB for port %d (result %d)", i, err); ++ if (err < 0) { ++ err("%s: Error %d for interrupt URB of port%d", ++ __func__, err, i); ++ goto err_out; ++ } ++ } ++ ++ for (i = 0; i < serial->num_ports; i++) { ++ /* walk all ports */ ++ port = serial->port[i]; ++ portdata = usb_get_serial_port_data(port); ++ ++ /* skip closed ports */ ++ spin_lock_irq(&intfdata->susp_lock); ++ if (!portdata->opened) { ++ spin_unlock_irq(&intfdata->susp_lock); ++ continue; ++ } ++ ++ for (j = 0; j < N_IN_URB; j++) { ++ urb = portdata->in_urbs[j]; ++ err = usb_submit_urb(urb, GFP_ATOMIC); ++ if (err < 0) { ++ err("%s: Error %d for bulk URB %d", ++ __func__, err, i); ++ spin_unlock_irq(&intfdata->susp_lock); ++ goto err_out; ++ } ++ } ++ play_delayed(port); ++ spin_unlock_irq(&intfdata->susp_lock); ++ } ++ spin_lock_irq(&intfdata->susp_lock); ++ intfdata->suspended = 0; ++ spin_unlock_irq(&intfdata->susp_lock); ++err_out: ++ return err; ++} ++EXPORT_SYMBOL(usb_wwan_resume); ++#endif ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_VERSION(DRIVER_VERSION); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Debug messages"); diff --git a/linux-2.6-utrace-ptrace.patch b/linux-2.6-utrace-ptrace.patch new file mode 100644 index 000000000..a3d36741d --- /dev/null +++ b/linux-2.6-utrace-ptrace.patch @@ -0,0 +1,1974 @@ +implement utrace-ptrace + +The patch adds the new file, kernel/ptrace-utrace.c, which contains +the new implementation of ptrace over utrace. + +This file is not compiled until we have CONFIG_UTRACE option, will be +added by the next "utrace core" patch. + +It's supposed to be an invisible implementation change, nothing should +change to userland when CONFIG_UTRACE is enabled. + +Signed-off-by: Roland McGrath +Signed-off-by: Oleg Nesterov +--- + include/linux/ptrace.h | 2 +- + kernel/Makefile | 1 + + kernel/ptrace-utrace.c | 1127 ++++++++++++++++++++++++++++++++++++++++++++++++ + kernel/ptrace.c | 654 ++++++++++++++-------------- + kernel/utrace.c | 16 + + 5 files changed, 1466 insertions(+), 334 deletions(-) + +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index 0d84f1e..102cb0f 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -99,7 +99,7 @@ + #include /* For unlikely. */ + #include /* For struct task_struct. */ + +- ++extern void ptrace_notify_stop(struct task_struct *tracee); + extern long arch_ptrace(struct task_struct *child, long request, long addr, long data); + extern int ptrace_traceme(void); + extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); +diff --git a/kernel/Makefile b/kernel/Makefile +index 8bbb631..0cf7a15 100644 +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -71,6 +71,7 @@ obj-$(CONFIG_RESOURCE_COUNTERS) += res_c + obj-$(CONFIG_STOP_MACHINE) += stop_machine.o + obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o + obj-$(CONFIG_UTRACE) += utrace.o ++obj-$(CONFIG_UTRACE) += ptrace-utrace.o + obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o + obj-$(CONFIG_AUDITSYSCALL) += auditsc.o + obj-$(CONFIG_GCOV_KERNEL) += gcov/ +diff --git a/kernel/ptrace-utrace.c b/kernel/ptrace-utrace.c +new file mode 100644 +index ...86234ee 100644 +--- /dev/null ++++ b/kernel/ptrace-utrace.c +@@ -0,0 +1,1127 @@ ++/* ++ * linux/kernel/ptrace.c ++ * ++ * (C) Copyright 1999 Linus Torvalds ++ * ++ * Common interfaces for "ptrace()" which we do not want ++ * to continually duplicate across every architecture. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * ptrace a task: make the debugger its new parent and ++ * move it to the ptrace list. ++ * ++ * Must be called with the tasklist lock write-held. ++ */ ++void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) ++{ ++ BUG_ON(!list_empty(&child->ptrace_entry)); ++ list_add(&child->ptrace_entry, &new_parent->ptraced); ++ child->parent = new_parent; ++} ++ ++/* ++ * unptrace a task: move it back to its original parent and ++ * remove it from the ptrace list. ++ * ++ * Must be called with the tasklist lock write-held. ++ */ ++void __ptrace_unlink(struct task_struct *child) ++{ ++ BUG_ON(!child->ptrace); ++ ++ child->ptrace = 0; ++ child->parent = child->real_parent; ++ list_del_init(&child->ptrace_entry); ++ ++ arch_ptrace_untrace(child); ++} ++ ++struct ptrace_context { ++ int options; ++ ++ int signr; ++ siginfo_t *siginfo; ++ ++ int stop_code; ++ unsigned long eventmsg; ++ ++ enum utrace_resume_action resume; ++}; ++ ++#define PT_UTRACED 0x00001000 ++ ++#define PTRACE_O_SYSEMU 0x100 ++ ++#define PTRACE_EVENT_SYSCALL (1 << 16) ++#define PTRACE_EVENT_SIGTRAP (2 << 16) ++#define PTRACE_EVENT_SIGNAL (3 << 16) ++/* events visible to user-space */ ++#define PTRACE_EVENT_MASK 0xFFFF ++ ++static inline bool ptrace_event_pending(struct ptrace_context *ctx) ++{ ++ return ctx->stop_code != 0; ++} ++ ++static inline int get_stop_event(struct ptrace_context *ctx) ++{ ++ return ctx->stop_code >> 8; ++} ++ ++static inline void set_stop_code(struct ptrace_context *ctx, int event) ++{ ++ ctx->stop_code = (event << 8) | SIGTRAP; ++} ++ ++static inline struct ptrace_context * ++ptrace_context(struct utrace_engine *engine) ++{ ++ return engine->data; ++} ++ ++static const struct utrace_engine_ops ptrace_utrace_ops; /* forward decl */ ++ ++static struct utrace_engine *ptrace_lookup_engine(struct task_struct *tracee) ++{ ++ return utrace_attach_task(tracee, UTRACE_ATTACH_MATCH_OPS, ++ &ptrace_utrace_ops, NULL); ++} ++ ++static struct utrace_engine * ++ptrace_reuse_engine(struct task_struct *tracee) ++{ ++ struct utrace_engine *engine; ++ struct ptrace_context *ctx; ++ int err = -EPERM; ++ ++ engine = ptrace_lookup_engine(tracee); ++ if (IS_ERR(engine)) ++ return engine; ++ ++ ctx = ptrace_context(engine); ++ if (unlikely(ctx->resume == UTRACE_DETACH)) { ++ /* ++ * Try to reuse this self-detaching engine. ++ * The only caller which can hit this case is ptrace_attach(), ++ * it holds ->cred_guard_mutex. ++ */ ++ ctx->options = 0; ++ ctx->eventmsg = 0; ++ ++ /* make sure we don't get unwanted reports */ ++ err = utrace_set_events(tracee, engine, UTRACE_EVENT(QUIESCE)); ++ if (!err || err == -EINPROGRESS) { ++ ctx->resume = UTRACE_RESUME; ++ /* synchronize with ptrace_report_signal() */ ++ err = utrace_barrier(tracee, engine); ++ } ++ WARN_ON(!err != (engine->ops == &ptrace_utrace_ops)); ++ ++ if (!err) ++ return engine; ++ } ++ ++ utrace_engine_put(engine); ++ return ERR_PTR(err); ++} ++ ++static struct utrace_engine * ++ptrace_attach_engine(struct task_struct *tracee) ++{ ++ struct utrace_engine *engine; ++ struct ptrace_context *ctx; ++ ++ if (unlikely(task_utrace_flags(tracee))) { ++ engine = ptrace_reuse_engine(tracee); ++ if (!IS_ERR(engine) || IS_ERR(engine) == -EPERM) ++ return engine; ++ } ++ ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ++ if (unlikely(!ctx)) ++ return ERR_PTR(-ENOMEM); ++ ++ ctx->resume = UTRACE_RESUME; ++ ++ engine = utrace_attach_task(tracee, UTRACE_ATTACH_CREATE | ++ UTRACE_ATTACH_EXCLUSIVE | ++ UTRACE_ATTACH_MATCH_OPS, ++ &ptrace_utrace_ops, ctx); ++ if (unlikely(IS_ERR(engine))) { ++ if (engine != ERR_PTR(-ESRCH) && ++ engine != ERR_PTR(-ERESTARTNOINTR)) ++ engine = ERR_PTR(-EPERM); ++ kfree(ctx); ++ } ++ ++ return engine; ++} ++ ++static inline int ptrace_set_events(struct task_struct *target, ++ struct utrace_engine *engine, ++ unsigned long options) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ /* ++ * We need QUIESCE for resume handling, CLONE to check ++ * for CLONE_PTRACE, other events are always reported. ++ */ ++ unsigned long events = UTRACE_EVENT(QUIESCE) | UTRACE_EVENT(CLONE) | ++ UTRACE_EVENT(EXEC) | UTRACE_EVENT_SIGNAL_ALL; ++ ++ ctx->options = options; ++ if (options & PTRACE_O_TRACEEXIT) ++ events |= UTRACE_EVENT(EXIT); ++ ++ return utrace_set_events(target, engine, events); ++} ++ ++/* ++ * Attach a utrace engine for ptrace and set up its event mask. ++ * Returns error code or 0 on success. ++ */ ++static int ptrace_attach_task(struct task_struct *tracee, int options) ++{ ++ struct utrace_engine *engine; ++ int err; ++ ++ engine = ptrace_attach_engine(tracee); ++ if (IS_ERR(engine)) ++ return PTR_ERR(engine); ++ /* ++ * It can fail only if the tracee is dead, the caller ++ * must notice this before setting PT_UTRACED. ++ */ ++ err = ptrace_set_events(tracee, engine, options); ++ WARN_ON(err && !tracee->exit_state); ++ utrace_engine_put(engine); ++ return 0; ++} ++ ++static int ptrace_wake_up(struct task_struct *tracee, ++ struct utrace_engine *engine, ++ enum utrace_resume_action action, ++ bool force_wakeup) ++{ ++ if (force_wakeup) { ++ unsigned long flags; ++ /* ++ * Preserve the compatibility bug. Historically ptrace ++ * wakes up the tracee even if it should not. Clear ++ * SIGNAL_STOP_STOPPED for utrace_wakeup(). ++ */ ++ if (lock_task_sighand(tracee, &flags)) { ++ tracee->signal->flags &= ~SIGNAL_STOP_STOPPED; ++ unlock_task_sighand(tracee, &flags); ++ } ++ } ++ ++ if (action != UTRACE_REPORT) ++ ptrace_context(engine)->stop_code = 0; ++ ++ return utrace_control(tracee, engine, action); ++} ++ ++static void ptrace_detach_task(struct task_struct *tracee, int sig) ++{ ++ /* ++ * If true, the caller is PTRACE_DETACH, otherwise ++ * the tracer detaches implicitly during exit. ++ */ ++ bool voluntary = (sig >= 0); ++ struct utrace_engine *engine = ptrace_lookup_engine(tracee); ++ enum utrace_resume_action action = UTRACE_DETACH; ++ ++ if (unlikely(IS_ERR(engine))) ++ return; ++ ++ if (sig) { ++ struct ptrace_context *ctx = ptrace_context(engine); ++ ++ switch (get_stop_event(ctx)) { ++ case PTRACE_EVENT_SYSCALL: ++ if (voluntary) ++ send_sig_info(sig, SEND_SIG_PRIV, tracee); ++ break; ++ ++ case PTRACE_EVENT_SIGNAL: ++ if (voluntary) ++ ctx->signr = sig; ++ ctx->resume = UTRACE_DETACH; ++ action = UTRACE_RESUME; ++ break; ++ } ++ } ++ ++ ptrace_wake_up(tracee, engine, action, voluntary); ++ utrace_engine_put(engine); ++} ++ ++static void ptrace_abort_attach(struct task_struct *tracee) ++{ ++ ptrace_detach_task(tracee, 0); ++} ++ ++static u32 ptrace_report_exit(u32 action, struct utrace_engine *engine, ++ long orig_code, long *code) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ ++ WARN_ON(ptrace_event_pending(ctx) && ++ !signal_group_exit(current->signal)); ++ ++ set_stop_code(ctx, PTRACE_EVENT_EXIT); ++ ctx->eventmsg = *code; ++ ++ return UTRACE_STOP; ++} ++ ++static void ptrace_clone_attach(struct task_struct *child, ++ int options) ++{ ++ struct task_struct *parent = current; ++ struct task_struct *tracer; ++ bool abort = true; ++ ++ if (unlikely(ptrace_attach_task(child, options))) { ++ WARN_ON(1); ++ return; ++ } ++ ++ write_lock_irq(&tasklist_lock); ++ tracer = parent->parent; ++ if (!(tracer->flags & PF_EXITING) && parent->ptrace) { ++ child->ptrace = parent->ptrace; ++ __ptrace_link(child, tracer); ++ abort = false; ++ } ++ write_unlock_irq(&tasklist_lock); ++ if (unlikely(abort)) { ++ ptrace_abort_attach(child); ++ return; ++ } ++ ++ sigaddset(&child->pending.signal, SIGSTOP); ++ set_tsk_thread_flag(child, TIF_SIGPENDING); ++} ++ ++static u32 ptrace_report_clone(u32 action, struct utrace_engine *engine, ++ unsigned long clone_flags, ++ struct task_struct *child) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ int event = 0; ++ ++ WARN_ON(ptrace_event_pending(ctx)); ++ ++ if (clone_flags & CLONE_UNTRACED) { ++ /* no events reported */ ++ } else if (clone_flags & CLONE_VFORK) { ++ if (ctx->options & PTRACE_O_TRACEVFORK) ++ event = PTRACE_EVENT_VFORK; ++ else if (ctx->options & PTRACE_O_TRACEVFORKDONE) ++ event = PTRACE_EVENT_VFORK_DONE; ++ } else if ((clone_flags & CSIGNAL) != SIGCHLD) { ++ if (ctx->options & PTRACE_O_TRACECLONE) ++ event = PTRACE_EVENT_CLONE; ++ } else if (ctx->options & PTRACE_O_TRACEFORK) { ++ event = PTRACE_EVENT_FORK; ++ } ++ /* ++ * Any of these reports implies auto-attaching the new child. ++ * So does CLONE_PTRACE, even with no event to report. ++ */ ++ if ((event && event != PTRACE_EVENT_VFORK_DONE) || ++ (clone_flags & CLONE_PTRACE)) ++ ptrace_clone_attach(child, ctx->options); ++ ++ if (!event) ++ return UTRACE_RESUME; ++ ++ set_stop_code(ctx, event); ++ ctx->eventmsg = child->pid; ++ /* ++ * We shouldn't stop now, inside the do_fork() path. ++ * We will stop later, before return to user-mode. ++ */ ++ if (event == PTRACE_EVENT_VFORK_DONE) ++ return UTRACE_REPORT; ++ else ++ return UTRACE_STOP; ++} ++ ++static inline void set_syscall_code(struct ptrace_context *ctx) ++{ ++ set_stop_code(ctx, PTRACE_EVENT_SYSCALL); ++ if (ctx->options & PTRACE_O_TRACESYSGOOD) ++ ctx->stop_code |= 0x80; ++} ++ ++static u32 ptrace_report_syscall_entry(u32 action, struct utrace_engine *engine, ++ struct pt_regs *regs) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ ++ if (action & UTRACE_SYSCALL_RESUMED) { ++ /* ++ * We already reported the first time. ++ * Nothing more to do now. ++ */ ++ if (unlikely(ctx->options & PTRACE_O_SYSEMU)) ++ return UTRACE_SYSCALL_ABORT | UTRACE_REPORT; ++ return utrace_syscall_action(action) | UTRACE_RESUME; ++ } ++ ++ WARN_ON(ptrace_event_pending(ctx)); ++ ++ set_syscall_code(ctx); ++ ++ if (unlikely(ctx->options & PTRACE_O_SYSEMU)) ++ return UTRACE_SYSCALL_ABORT | UTRACE_REPORT; ++ /* ++ * Stop now to report. We will get another callback after ++ * we resume, with the UTRACE_SYSCALL_RESUMED flag set. ++ */ ++ return UTRACE_SYSCALL_RUN | UTRACE_STOP; ++} ++ ++static u32 ptrace_report_syscall_exit(u32 action, struct utrace_engine *engine, ++ struct pt_regs *regs) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ ++ if (ptrace_event_pending(ctx)) ++ return UTRACE_STOP; ++ ++ if (ctx->resume != UTRACE_RESUME) { ++ WARN_ON(ctx->resume != UTRACE_BLOCKSTEP && ++ ctx->resume != UTRACE_SINGLESTEP); ++ ctx->resume = UTRACE_RESUME; ++ ++ ctx->signr = SIGTRAP; ++ return UTRACE_INTERRUPT; ++ } ++ ++ set_syscall_code(ctx); ++ return UTRACE_STOP; ++} ++ ++static u32 ptrace_report_exec(u32 action, struct utrace_engine *engine, ++ const struct linux_binfmt *fmt, ++ const struct linux_binprm *bprm, ++ struct pt_regs *regs) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ ++ WARN_ON(ptrace_event_pending(ctx)); ++ ++ if (!(ctx->options & PTRACE_O_TRACEEXEC)) { ++ /* ++ * Old-fashioned ptrace'd exec just posts a plain signal. ++ */ ++ send_sig(SIGTRAP, current, 0); ++ return UTRACE_RESUME; ++ } ++ ++ set_stop_code(ctx, PTRACE_EVENT_EXEC); ++ return UTRACE_STOP; ++} ++ ++static enum utrace_signal_action resume_signal(struct ptrace_context *ctx, ++ struct k_sigaction *return_ka) ++{ ++ siginfo_t *info = ctx->siginfo; ++ int signr = ctx->signr; ++ ++ ctx->siginfo = NULL; ++ ctx->signr = 0; ++ ++ /* Did the debugger cancel the sig? */ ++ if (!signr) ++ return UTRACE_SIGNAL_IGN; ++ /* ++ * Update the siginfo structure if the signal has changed. ++ * If the debugger wanted something specific in the siginfo ++ * then it should have updated *info via PTRACE_SETSIGINFO. ++ */ ++ if (info->si_signo != signr) { ++ info->si_signo = signr; ++ info->si_errno = 0; ++ info->si_code = SI_USER; ++ info->si_pid = task_pid_vnr(current->parent); ++ info->si_uid = task_uid(current->parent); ++ } ++ ++ /* If the (new) signal is now blocked, requeue it. */ ++ if (sigismember(¤t->blocked, signr)) { ++ send_sig_info(signr, info, current); ++ return UTRACE_SIGNAL_IGN; ++ } ++ ++ spin_lock_irq(¤t->sighand->siglock); ++ *return_ka = current->sighand->action[signr - 1]; ++ spin_unlock_irq(¤t->sighand->siglock); ++ ++ return UTRACE_SIGNAL_DELIVER; ++} ++ ++static u32 ptrace_report_signal(u32 action, struct utrace_engine *engine, ++ struct pt_regs *regs, ++ siginfo_t *info, ++ const struct k_sigaction *orig_ka, ++ struct k_sigaction *return_ka) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ enum utrace_resume_action resume = ctx->resume; ++ ++ if (ptrace_event_pending(ctx)) { ++ action = utrace_signal_action(action); ++ WARN_ON(action != UTRACE_SIGNAL_REPORT); ++ return action | UTRACE_STOP; ++ } ++ ++ switch (utrace_signal_action(action)) { ++ case UTRACE_SIGNAL_HANDLER: ++ if (WARN_ON(ctx->siginfo)) ++ ctx->siginfo = NULL; ++ ++ if (resume != UTRACE_RESUME) { ++ WARN_ON(resume != UTRACE_BLOCKSTEP && ++ resume != UTRACE_SINGLESTEP); ++ ++ set_stop_code(ctx, PTRACE_EVENT_SIGTRAP); ++ return UTRACE_STOP | UTRACE_SIGNAL_IGN; ++ } ++ ++ case UTRACE_SIGNAL_REPORT: ++ if (!ctx->siginfo) { ++ if (ctx->signr) { ++ /* set by ptrace_resume(SYSCALL_EXIT) */ ++ WARN_ON(ctx->signr != SIGTRAP); ++ user_single_step_siginfo(current, regs, info); ++ force_sig_info(SIGTRAP, info, current); ++ } ++ ++ return resume | UTRACE_SIGNAL_IGN; ++ } ++ ++ if (WARN_ON(ctx->siginfo != info)) ++ return resume | UTRACE_SIGNAL_IGN; ++ ++ return resume | resume_signal(ctx, return_ka); ++ ++ default: ++ break; ++ } ++ ++ WARN_ON(ctx->siginfo); ++ ctx->siginfo = info; ++ /* ++ * ctx->siginfo points to the caller's stack. ++ * Make sure the subsequent UTRACE_SIGNAL_REPORT clears ++ * ->siginfo before return from get_signal_to_deliver(). ++ */ ++ if (utrace_control(current, engine, UTRACE_INTERRUPT)) ++ WARN_ON(1); ++ ++ ctx->signr = info->si_signo; ++ ctx->stop_code = (PTRACE_EVENT_SIGNAL << 8) | ctx->signr; ++ ++ return UTRACE_STOP | UTRACE_SIGNAL_IGN; ++} ++ ++static u32 ptrace_report_quiesce(u32 action, struct utrace_engine *engine, ++ unsigned long event) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ ++ if (ptrace_event_pending(ctx)) ++ return UTRACE_STOP; ++ ++ return event ? UTRACE_RESUME : ctx->resume; ++} ++ ++static void ptrace_release(void *data) ++{ ++ kfree(data); ++} ++ ++static const struct utrace_engine_ops ptrace_utrace_ops = { ++ .report_signal = ptrace_report_signal, ++ .report_quiesce = ptrace_report_quiesce, ++ .report_exec = ptrace_report_exec, ++ .report_exit = ptrace_report_exit, ++ .report_clone = ptrace_report_clone, ++ .report_syscall_entry = ptrace_report_syscall_entry, ++ .report_syscall_exit = ptrace_report_syscall_exit, ++ .release = ptrace_release, ++}; ++ ++int ptrace_check_attach(struct task_struct *child, int kill) ++{ ++ struct utrace_engine *engine; ++ struct utrace_examiner exam; ++ int ret = -ESRCH; ++ ++ engine = ptrace_lookup_engine(child); ++ if (IS_ERR(engine)) ++ return ret; ++ ++ if (child->parent != current) ++ goto out; ++ ++ if (unlikely(kill)) ++ ret = 0; ++ ++ if (!task_is_stopped_or_traced(child)) ++ goto out; ++ /* ++ * Make sure our engine has already stopped the child. ++ * Then wait for it to be off the CPU. ++ */ ++ if (!utrace_control(child, engine, UTRACE_STOP) && ++ !utrace_prepare_examine(child, engine, &exam)) ++ ret = 0; ++out: ++ utrace_engine_put(engine); ++ return ret; ++} ++ ++int ptrace_attach(struct task_struct *task) ++{ ++ int retval; ++ ++ audit_ptrace(task); ++ ++ retval = -EPERM; ++ if (unlikely(task->flags & PF_KTHREAD)) ++ goto out; ++ if (same_thread_group(task, current)) ++ goto out; ++ ++ /* ++ * Protect exec's credential calculations against our interference; ++ * interference; SUID, SGID and LSM creds get determined differently ++ * under ptrace. ++ */ ++ retval = -ERESTARTNOINTR; ++ if (mutex_lock_interruptible(&task->cred_guard_mutex)) ++ goto out; ++ ++ task_lock(task); ++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ task_unlock(task); ++ if (retval) ++ goto unlock_creds; ++ ++ retval = ptrace_attach_task(task, 0); ++ if (unlikely(retval)) ++ goto unlock_creds; ++ ++ write_lock_irq(&tasklist_lock); ++ retval = -EPERM; ++ if (unlikely(task->exit_state)) ++ goto unlock_tasklist; ++ ++ BUG_ON(task->ptrace); ++ task->ptrace = PT_UTRACED; ++ if (capable(CAP_SYS_PTRACE)) ++ task->ptrace |= PT_PTRACE_CAP; ++ ++ __ptrace_link(task, current); ++ send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); ++ ++ retval = 0; ++unlock_tasklist: ++ write_unlock_irq(&tasklist_lock); ++unlock_creds: ++ mutex_unlock(&task->cred_guard_mutex); ++out: ++ return retval; ++} ++ ++/* ++ * Performs checks and sets PT_UTRACED. ++ * Should be used by all ptrace implementations for PTRACE_TRACEME. ++ */ ++int ptrace_traceme(void) ++{ ++ bool detach = true; ++ int ret = ptrace_attach_task(current, 0); ++ ++ if (unlikely(ret)) ++ return ret; ++ ++ ret = -EPERM; ++ write_lock_irq(&tasklist_lock); ++ BUG_ON(current->ptrace); ++ ret = security_ptrace_traceme(current->parent); ++ /* ++ * Check PF_EXITING to ensure ->real_parent has not passed ++ * exit_ptrace(). Otherwise we don't report the error but ++ * pretend ->real_parent untraces us right after return. ++ */ ++ if (!ret && !(current->real_parent->flags & PF_EXITING)) { ++ current->ptrace = PT_UTRACED; ++ __ptrace_link(current, current->real_parent); ++ detach = false; ++ } ++ write_unlock_irq(&tasklist_lock); ++ ++ if (detach) ++ ptrace_abort_attach(current); ++ return ret; ++} ++ ++static void ptrace_do_detach(struct task_struct *tracee, unsigned int data) ++{ ++ bool detach, release; ++ ++ write_lock_irq(&tasklist_lock); ++ /* ++ * This tracee can be already killed. Make sure de_thread() or ++ * our sub-thread doing do_wait() didn't do release_task() yet. ++ */ ++ detach = tracee->ptrace != 0; ++ release = false; ++ if (likely(detach)) ++ release = __ptrace_detach(current, tracee); ++ write_unlock_irq(&tasklist_lock); ++ ++ if (unlikely(release)) ++ release_task(tracee); ++ else if (likely(detach)) ++ ptrace_detach_task(tracee, data); ++} ++ ++int ptrace_detach(struct task_struct *child, unsigned int data) ++{ ++ if (!valid_signal(data)) ++ return -EIO; ++ ++ ptrace_do_detach(child, data); ++ ++ return 0; ++} ++ ++/* ++ * Detach all tasks we were using ptrace on. ++ */ ++void exit_ptrace(struct task_struct *tracer) ++{ ++ for (;;) { ++ struct task_struct *tracee = NULL; ++ ++ read_lock(&tasklist_lock); ++ if (!list_empty(&tracer->ptraced)) { ++ tracee = list_first_entry(&tracer->ptraced, ++ struct task_struct, ptrace_entry); ++ get_task_struct(tracee); ++ } ++ read_unlock(&tasklist_lock); ++ if (!tracee) ++ break; ++ ++ ptrace_do_detach(tracee, -1); ++ put_task_struct(tracee); ++ } ++} ++ ++static int ptrace_set_options(struct task_struct *tracee, ++ struct utrace_engine *engine, long data) ++{ ++ BUILD_BUG_ON(PTRACE_O_MASK & PTRACE_O_SYSEMU); ++ ++ ptrace_set_events(tracee, engine, data & PTRACE_O_MASK); ++ return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; ++} ++ ++static int ptrace_rw_siginfo(struct task_struct *tracee, ++ struct ptrace_context *ctx, ++ siginfo_t *info, bool write) ++{ ++ unsigned long flags; ++ int err; ++ ++ switch (get_stop_event(ctx)) { ++ case 0: /* jctl stop */ ++ return -EINVAL; ++ ++ case PTRACE_EVENT_SIGNAL: ++ err = -ESRCH; ++ if (lock_task_sighand(tracee, &flags)) { ++ if (likely(task_is_traced(tracee))) { ++ if (write) ++ *ctx->siginfo = *info; ++ else ++ *info = *ctx->siginfo; ++ err = 0; ++ } ++ unlock_task_sighand(tracee, &flags); ++ } ++ ++ return err; ++ ++ default: ++ if (!write) { ++ memset(info, 0, sizeof(*info)); ++ info->si_signo = SIGTRAP; ++ info->si_code = ctx->stop_code & PTRACE_EVENT_MASK; ++ info->si_pid = task_pid_vnr(tracee); ++ info->si_uid = task_uid(tracee); ++ } ++ ++ return 0; ++ } ++} ++ ++static void do_ptrace_notify_stop(struct ptrace_context *ctx, ++ struct task_struct *tracee) ++{ ++ /* ++ * This can race with SIGKILL, but we borrow this race from ++ * the old ptrace implementation. ->exit_code is only needed ++ * for wait_task_stopped()->task_stopped_code(), we should ++ * change it to use ptrace_context. ++ */ ++ tracee->exit_code = ctx->stop_code & PTRACE_EVENT_MASK; ++ WARN_ON(!tracee->exit_code); ++ ++ read_lock(&tasklist_lock); ++ /* ++ * Don't want to allow preemption here, because ++ * sys_ptrace() needs this task to be inactive. ++ */ ++ preempt_disable(); ++ /* ++ * It can be killed and then released by our subthread, ++ * or ptrace_attach() has not completed yet. ++ */ ++ if (task_ptrace(tracee)) ++ do_notify_parent_cldstop(tracee, CLD_TRAPPED); ++ read_unlock(&tasklist_lock); ++ preempt_enable_no_resched(); ++} ++ ++void ptrace_notify_stop(struct task_struct *tracee) ++{ ++ struct utrace_engine *engine = ptrace_lookup_engine(tracee); ++ ++ if (IS_ERR(engine)) ++ return; ++ ++ do_ptrace_notify_stop(ptrace_context(engine), tracee); ++ utrace_engine_put(engine); ++} ++ ++static int ptrace_resume_action(struct task_struct *tracee, ++ struct utrace_engine *engine, long request) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ unsigned long events; ++ int action; ++ ++ ctx->options &= ~PTRACE_O_SYSEMU; ++ events = engine->flags & ~UTRACE_EVENT_SYSCALL; ++ action = UTRACE_RESUME; ++ ++ switch (request) { ++#ifdef PTRACE_SINGLEBLOCK ++ case PTRACE_SINGLEBLOCK: ++ if (unlikely(!arch_has_block_step())) ++ return -EIO; ++ action = UTRACE_BLOCKSTEP; ++ events |= UTRACE_EVENT(SYSCALL_EXIT); ++ break; ++#endif ++ ++#ifdef PTRACE_SINGLESTEP ++ case PTRACE_SINGLESTEP: ++ if (unlikely(!arch_has_single_step())) ++ return -EIO; ++ action = UTRACE_SINGLESTEP; ++ events |= UTRACE_EVENT(SYSCALL_EXIT); ++ break; ++#endif ++ ++#ifdef PTRACE_SYSEMU ++ case PTRACE_SYSEMU_SINGLESTEP: ++ if (unlikely(!arch_has_single_step())) ++ return -EIO; ++ action = UTRACE_SINGLESTEP; ++ case PTRACE_SYSEMU: ++ ctx->options |= PTRACE_O_SYSEMU; ++ events |= UTRACE_EVENT(SYSCALL_ENTRY); ++ break; ++#endif ++ ++ case PTRACE_SYSCALL: ++ events |= UTRACE_EVENT_SYSCALL; ++ break; ++ ++ case PTRACE_CONT: ++ break; ++ default: ++ return -EIO; ++ } ++ ++ if (events != engine->flags && ++ utrace_set_events(tracee, engine, events)) ++ return -ESRCH; ++ ++ return action; ++} ++ ++static int ptrace_resume(struct task_struct *tracee, ++ struct utrace_engine *engine, ++ long request, long data) ++{ ++ struct ptrace_context *ctx = ptrace_context(engine); ++ int action; ++ ++ if (!valid_signal(data)) ++ return -EIO; ++ ++ action = ptrace_resume_action(tracee, engine, request); ++ if (action < 0) ++ return action; ++ ++ switch (get_stop_event(ctx)) { ++ case PTRACE_EVENT_VFORK: ++ if (ctx->options & PTRACE_O_TRACEVFORKDONE) { ++ set_stop_code(ctx, PTRACE_EVENT_VFORK_DONE); ++ action = UTRACE_REPORT; ++ } ++ break; ++ ++ case PTRACE_EVENT_EXEC: ++ case PTRACE_EVENT_FORK: ++ case PTRACE_EVENT_CLONE: ++ case PTRACE_EVENT_VFORK_DONE: ++ if (request == PTRACE_SYSCALL) { ++ set_syscall_code(ctx); ++ do_ptrace_notify_stop(ctx, tracee); ++ return 0; ++ } ++ ++ if (action != UTRACE_RESUME) { ++ /* ++ * single-stepping. UTRACE_SIGNAL_REPORT will ++ * synthesize a trap to follow the syscall insn. ++ */ ++ ctx->signr = SIGTRAP; ++ action = UTRACE_INTERRUPT; ++ } ++ break; ++ ++ case PTRACE_EVENT_SYSCALL: ++ if (data) ++ send_sig_info(data, SEND_SIG_PRIV, tracee); ++ break; ++ ++ case PTRACE_EVENT_SIGNAL: ++ ctx->signr = data; ++ break; ++ } ++ ++ ctx->resume = action; ++ ptrace_wake_up(tracee, engine, action, true); ++ return 0; ++} ++ ++extern int ptrace_regset(struct task_struct *task, int req, unsigned int type, ++ struct iovec *kiov); ++ ++int ptrace_request(struct task_struct *child, long request, ++ long addr, long data) ++{ ++ struct utrace_engine *engine = ptrace_lookup_engine(child); ++ siginfo_t siginfo; ++ int ret; ++ ++ if (unlikely(IS_ERR(engine))) ++ return -ESRCH; ++ ++ switch (request) { ++ case PTRACE_PEEKTEXT: ++ case PTRACE_PEEKDATA: ++ ret = generic_ptrace_peekdata(child, addr, data); ++ break; ++ case PTRACE_POKETEXT: ++ case PTRACE_POKEDATA: ++ ret = generic_ptrace_pokedata(child, addr, data); ++ break; ++ ++#ifdef PTRACE_OLDSETOPTIONS ++ case PTRACE_OLDSETOPTIONS: ++#endif ++ case PTRACE_SETOPTIONS: ++ ret = ptrace_set_options(child, engine, data); ++ break; ++ case PTRACE_GETEVENTMSG: ++ ret = put_user(ptrace_context(engine)->eventmsg, ++ (unsigned long __user *) data); ++ break; ++ ++ case PTRACE_GETSIGINFO: ++ ret = ptrace_rw_siginfo(child, ptrace_context(engine), ++ &siginfo, false); ++ if (!ret) ++ ret = copy_siginfo_to_user((siginfo_t __user *) data, ++ &siginfo); ++ break; ++ ++ case PTRACE_SETSIGINFO: ++ if (copy_from_user(&siginfo, (siginfo_t __user *) data, ++ sizeof siginfo)) ++ ret = -EFAULT; ++ else ++ ret = ptrace_rw_siginfo(child, ptrace_context(engine), ++ &siginfo, true); ++ break; ++ ++ case PTRACE_DETACH: /* detach a process that was attached. */ ++ ret = ptrace_detach(child, data); ++ break; ++ ++ case PTRACE_KILL: ++ /* Ugly historical behaviour. */ ++ if (task_is_traced(child)) ++ ptrace_resume(child, engine, PTRACE_CONT, SIGKILL); ++ ret = 0; ++ break; ++ ++ case PTRACE_GETREGSET: ++ case PTRACE_SETREGSET: ++ { ++ struct iovec kiov; ++ struct iovec __user *uiov = (struct iovec __user *) data; ++ ++ if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) ++ return -EFAULT; ++ ++ if (__get_user(kiov.iov_base, &uiov->iov_base) || ++ __get_user(kiov.iov_len, &uiov->iov_len)) ++ return -EFAULT; ++ ++ ret = ptrace_regset(child, request, addr, &kiov); ++ if (!ret) ++ ret = __put_user(kiov.iov_len, &uiov->iov_len); ++ break; ++ } ++ ++ default: ++ ret = ptrace_resume(child, engine, request, data); ++ break; ++ } ++ ++ utrace_engine_put(engine); ++ return ret; ++} ++ ++#if defined CONFIG_COMPAT ++#include ++ ++int compat_ptrace_request(struct task_struct *child, compat_long_t request, ++ compat_ulong_t addr, compat_ulong_t data) ++{ ++ struct utrace_engine *engine = ptrace_lookup_engine(child); ++ compat_ulong_t __user *datap = compat_ptr(data); ++ compat_ulong_t word; ++ siginfo_t siginfo; ++ int ret; ++ ++ if (unlikely(IS_ERR(engine))) ++ return -ESRCH; ++ ++ switch (request) { ++ case PTRACE_PEEKTEXT: ++ case PTRACE_PEEKDATA: ++ ret = access_process_vm(child, addr, &word, sizeof(word), 0); ++ if (ret != sizeof(word)) ++ ret = -EIO; ++ else ++ ret = put_user(word, datap); ++ break; ++ ++ case PTRACE_POKETEXT: ++ case PTRACE_POKEDATA: ++ ret = access_process_vm(child, addr, &data, sizeof(data), 1); ++ ret = (ret != sizeof(data) ? -EIO : 0); ++ break; ++ ++ case PTRACE_GETEVENTMSG: ++ ret = put_user((compat_ulong_t)ptrace_context(engine)->eventmsg, ++ datap); ++ break; ++ ++ case PTRACE_GETSIGINFO: ++ ret = ptrace_rw_siginfo(child, ptrace_context(engine), ++ &siginfo, false); ++ if (!ret) ++ ret = copy_siginfo_to_user32( ++ (struct compat_siginfo __user *) datap, ++ &siginfo); ++ break; ++ ++ case PTRACE_SETSIGINFO: ++ memset(&siginfo, 0, sizeof siginfo); ++ if (copy_siginfo_from_user32( ++ &siginfo, (struct compat_siginfo __user *) datap)) ++ ret = -EFAULT; ++ else ++ ret = ptrace_rw_siginfo(child, ptrace_context(engine), ++ &siginfo, true); ++ break; ++ ++ case PTRACE_GETREGSET: ++ case PTRACE_SETREGSET: ++ { ++ struct iovec kiov; ++ struct compat_iovec __user *uiov = ++ (struct compat_iovec __user *) datap; ++ compat_uptr_t ptr; ++ compat_size_t len; ++ ++ if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) ++ return -EFAULT; ++ ++ if (__get_user(ptr, &uiov->iov_base) || ++ __get_user(len, &uiov->iov_len)) ++ return -EFAULT; ++ ++ kiov.iov_base = compat_ptr(ptr); ++ kiov.iov_len = len; ++ ++ ret = ptrace_regset(child, request, addr, &kiov); ++ if (!ret) ++ ret = __put_user(kiov.iov_len, &uiov->iov_len); ++ break; ++ } ++ ++ default: ++ ret = ptrace_request(child, request, addr, data); ++ } ++ ++ utrace_engine_put(engine); ++ return ret; ++} ++#endif /* CONFIG_COMPAT */ +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 0ad4dc0..448b353 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -16,7 +16,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -25,7 +24,327 @@ + #include + #include + ++int __ptrace_may_access(struct task_struct *task, unsigned int mode) ++{ ++ const struct cred *cred = current_cred(), *tcred; ++ ++ /* May we inspect the given task? ++ * This check is used both for attaching with ptrace ++ * and for allowing access to sensitive information in /proc. ++ * ++ * ptrace_attach denies several cases that /proc allows ++ * because setting up the necessary parent/child relationship ++ * or halting the specified task is impossible. ++ */ ++ int dumpable = 0; ++ /* Don't let security modules deny introspection */ ++ if (task == current) ++ return 0; ++ rcu_read_lock(); ++ tcred = __task_cred(task); ++ if ((cred->uid != tcred->euid || ++ cred->uid != tcred->suid || ++ cred->uid != tcred->uid || ++ cred->gid != tcred->egid || ++ cred->gid != tcred->sgid || ++ cred->gid != tcred->gid) && ++ !capable(CAP_SYS_PTRACE)) { ++ rcu_read_unlock(); ++ return -EPERM; ++ } ++ rcu_read_unlock(); ++ smp_rmb(); ++ if (task->mm) ++ dumpable = get_dumpable(task->mm); ++ if (!dumpable && !capable(CAP_SYS_PTRACE)) ++ return -EPERM; ++ ++ return security_ptrace_access_check(task, mode); ++} ++ ++bool ptrace_may_access(struct task_struct *task, unsigned int mode) ++{ ++ int err; ++ task_lock(task); ++ err = __ptrace_may_access(task, mode); ++ task_unlock(task); ++ return !err; ++} ++ ++/* ++ * Called with irqs disabled, returns true if childs should reap themselves. ++ */ ++static int ignoring_children(struct sighand_struct *sigh) ++{ ++ int ret; ++ spin_lock(&sigh->siglock); ++ ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || ++ (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); ++ spin_unlock(&sigh->siglock); ++ return ret; ++} ++ ++/* ++ * Called with tasklist_lock held for writing. ++ * Unlink a traced task, and clean it up if it was a traced zombie. ++ * Return true if it needs to be reaped with release_task(). ++ * (We can't call release_task() here because we already hold tasklist_lock.) ++ * ++ * If it's a zombie, our attachedness prevented normal parent notification ++ * or self-reaping. Do notification now if it would have happened earlier. ++ * If it should reap itself, return true. ++ * ++ * If it's our own child, there is no notification to do. But if our normal ++ * children self-reap, then this child was prevented by ptrace and we must ++ * reap it now, in that case we must also wake up sub-threads sleeping in ++ * do_wait(). ++ */ ++bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) ++{ ++ __ptrace_unlink(p); ++ ++ if (p->exit_state == EXIT_ZOMBIE) { ++ if (!task_detached(p) && thread_group_empty(p)) { ++ if (!same_thread_group(p->real_parent, tracer)) ++ do_notify_parent(p, p->exit_signal); ++ else if (ignoring_children(tracer->sighand)) { ++ __wake_up_parent(p, tracer); ++ p->exit_signal = -1; ++ } ++ } ++ if (task_detached(p)) { ++ /* Mark it as in the process of being reaped. */ ++ p->exit_state = EXIT_DEAD; ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) ++{ ++ int copied = 0; ++ ++ while (len > 0) { ++ char buf[128]; ++ int this_len, retval; ++ ++ this_len = (len > sizeof(buf)) ? sizeof(buf) : len; ++ retval = access_process_vm(tsk, src, buf, this_len, 0); ++ if (!retval) { ++ if (copied) ++ break; ++ return -EIO; ++ } ++ if (copy_to_user(dst, buf, retval)) ++ return -EFAULT; ++ copied += retval; ++ src += retval; ++ dst += retval; ++ len -= retval; ++ } ++ return copied; ++} ++ ++int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) ++{ ++ int copied = 0; ++ ++ while (len > 0) { ++ char buf[128]; ++ int this_len, retval; ++ ++ this_len = (len > sizeof(buf)) ? sizeof(buf) : len; ++ if (copy_from_user(buf, src, this_len)) ++ return -EFAULT; ++ retval = access_process_vm(tsk, dst, buf, this_len, 1); ++ if (!retval) { ++ if (copied) ++ break; ++ return -EIO; ++ } ++ copied += retval; ++ src += retval; ++ dst += retval; ++ len -= retval; ++ } ++ return copied; ++} ++ ++#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++ ++static const struct user_regset * ++find_regset(const struct user_regset_view *view, unsigned int type) ++{ ++ const struct user_regset *regset; ++ int n; ++ ++ for (n = 0; n < view->n; ++n) { ++ regset = view->regsets + n; ++ if (regset->core_note_type == type) ++ return regset; ++ } ++ ++ return NULL; ++} ++ ++int ptrace_regset(struct task_struct *task, int req, unsigned int type, ++ struct iovec *kiov) ++{ ++ const struct user_regset_view *view = task_user_regset_view(task); ++ const struct user_regset *regset = find_regset(view, type); ++ int regset_no; ++ ++ if (!regset || (kiov->iov_len % regset->size) != 0) ++ return -EINVAL; ++ ++ regset_no = regset - view->regsets; ++ kiov->iov_len = min(kiov->iov_len, ++ (__kernel_size_t) (regset->n * regset->size)); ++ ++ if (req == PTRACE_GETREGSET) ++ return copy_regset_to_user(task, view, regset_no, 0, ++ kiov->iov_len, kiov->iov_base); ++ else ++ return copy_regset_from_user(task, view, regset_no, 0, ++ kiov->iov_len, kiov->iov_base); ++} ++ ++#endif ++ ++static struct task_struct *ptrace_get_task_struct(pid_t pid) ++{ ++ struct task_struct *child; ++ ++ rcu_read_lock(); ++ child = find_task_by_vpid(pid); ++ if (child) ++ get_task_struct(child); ++ rcu_read_unlock(); ++ ++ if (!child) ++ return ERR_PTR(-ESRCH); ++ return child; ++} ++ ++#ifndef arch_ptrace_attach ++#define arch_ptrace_attach(child) do { } while (0) ++#endif ++ ++SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) ++{ ++ struct task_struct *child; ++ long ret; ++ ++ /* ++ * This lock_kernel fixes a subtle race with suid exec ++ */ ++ lock_kernel(); ++ if (request == PTRACE_TRACEME) { ++ ret = ptrace_traceme(); ++ if (!ret) ++ arch_ptrace_attach(current); ++ goto out; ++ } ++ ++ child = ptrace_get_task_struct(pid); ++ if (IS_ERR(child)) { ++ ret = PTR_ERR(child); ++ goto out; ++ } ++ ++ if (request == PTRACE_ATTACH) { ++ ret = ptrace_attach(child); ++ /* ++ * Some architectures need to do book-keeping after ++ * a ptrace attach. ++ */ ++ if (!ret) ++ arch_ptrace_attach(child); ++ goto out_put_task_struct; ++ } ++ ++ ret = ptrace_check_attach(child, request == PTRACE_KILL); ++ if (ret < 0) ++ goto out_put_task_struct; ++ ++ ret = arch_ptrace(child, request, addr, data); ++ ++ out_put_task_struct: ++ put_task_struct(child); ++ out: ++ unlock_kernel(); ++ return ret; ++} + ++int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) ++{ ++ unsigned long tmp; ++ int copied; ++ ++ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); ++ if (copied != sizeof(tmp)) ++ return -EIO; ++ return put_user(tmp, (unsigned long __user *)data); ++} ++ ++int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) ++{ ++ int copied; ++ ++ copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); ++ return (copied == sizeof(data)) ? 0 : -EIO; ++} ++ ++#if defined CONFIG_COMPAT ++#include ++ ++asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, ++ compat_long_t addr, compat_long_t data) ++{ ++ struct task_struct *child; ++ long ret; ++ ++ /* ++ * This lock_kernel fixes a subtle race with suid exec ++ */ ++ lock_kernel(); ++ if (request == PTRACE_TRACEME) { ++ ret = ptrace_traceme(); ++ goto out; ++ } ++ ++ child = ptrace_get_task_struct(pid); ++ if (IS_ERR(child)) { ++ ret = PTR_ERR(child); ++ goto out; ++ } ++ ++ if (request == PTRACE_ATTACH) { ++ ret = ptrace_attach(child); ++ /* ++ * Some architectures need to do book-keeping after ++ * a ptrace attach. ++ */ ++ if (!ret) ++ arch_ptrace_attach(child); ++ goto out_put_task_struct; ++ } ++ ++ ret = ptrace_check_attach(child, request == PTRACE_KILL); ++ if (!ret) ++ ret = compat_arch_ptrace(child, request, addr, data); ++ ++ out_put_task_struct: ++ put_task_struct(child); ++ out: ++ unlock_kernel(); ++ return ret; ++} ++#endif /* CONFIG_COMPAT */ ++ ++#ifndef CONFIG_UTRACE + /* + * ptrace a task: make the debugger its new parent and + * move it to the ptrace list. +@@ -119,61 +438,6 @@ int ptrace_check_attach(struct task_stru + return ret; + } + +-int __ptrace_may_access(struct task_struct *task, unsigned int mode) +-{ +- const struct cred *cred = current_cred(), *tcred; +- +- /* May we inspect the given task? +- * This check is used both for attaching with ptrace +- * and for allowing access to sensitive information in /proc. +- * +- * ptrace_attach denies several cases that /proc allows +- * because setting up the necessary parent/child relationship +- * or halting the specified task is impossible. +- */ +- int dumpable = 0; +- /* Don't let security modules deny introspection */ +- if (task == current) +- return 0; +- rcu_read_lock(); +- tcred = __task_cred(task); +- if ((cred->uid != tcred->euid || +- cred->uid != tcred->suid || +- cred->uid != tcred->uid || +- cred->gid != tcred->egid || +- cred->gid != tcred->sgid || +- cred->gid != tcred->gid) && +- !capable(CAP_SYS_PTRACE)) { +- rcu_read_unlock(); +- return -EPERM; +- } +- rcu_read_unlock(); +- smp_rmb(); +- if (task->mm) +- dumpable = get_dumpable(task->mm); +- if (!dumpable && !capable(CAP_SYS_PTRACE)) +- return -EPERM; +- +- return security_ptrace_access_check(task, mode); +-} +- +-bool ptrace_may_access(struct task_struct *task, unsigned int mode) +-{ +- int err; +- task_lock(task); +- err = __ptrace_may_access(task, mode); +- task_unlock(task); +- return !err; +-} +- +-/* +- * For experimental use of utrace, exclude ptrace on the same task. +- */ +-static inline bool exclude_ptrace(struct task_struct *task) +-{ +- return unlikely(!!task_utrace_flags(task)); +-} +- + int ptrace_attach(struct task_struct *task) + { + int retval; +@@ -197,8 +461,6 @@ int ptrace_attach(struct task_struct *ta + + task_lock(task); + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); +- if (!retval && exclude_ptrace(task)) +- retval = -EBUSY; + task_unlock(task); + if (retval) + goto unlock_creds; +@@ -226,87 +488,33 @@ out: + return retval; + } + +-/** +- * ptrace_traceme -- helper for PTRACE_TRACEME +- * +- * Performs checks and sets PT_PTRACED. +- * Should be used by all ptrace implementations for PTRACE_TRACEME. +- */ +-int ptrace_traceme(void) +-{ +- int ret = -EPERM; +- +- if (exclude_ptrace(current)) /* XXX locking */ +- return -EBUSY; +- +- write_lock_irq(&tasklist_lock); +- /* Are we already being traced? */ +- if (!current->ptrace) { +- ret = security_ptrace_traceme(current->parent); +- /* +- * Check PF_EXITING to ensure ->real_parent has not passed +- * exit_ptrace(). Otherwise we don't report the error but +- * pretend ->real_parent untraces us right after return. +- */ +- if (!ret && !(current->real_parent->flags & PF_EXITING)) { +- current->ptrace = PT_PTRACED; +- __ptrace_link(current, current->real_parent); +- } +- } +- write_unlock_irq(&tasklist_lock); +- +- return ret; +-} +- +-/* +- * Called with irqs disabled, returns true if childs should reap themselves. +- */ +-static int ignoring_children(struct sighand_struct *sigh) +-{ +- int ret; +- spin_lock(&sigh->siglock); +- ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || +- (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); +- spin_unlock(&sigh->siglock); +- return ret; +-} +- +-/* +- * Called with tasklist_lock held for writing. +- * Unlink a traced task, and clean it up if it was a traced zombie. +- * Return true if it needs to be reaped with release_task(). +- * (We can't call release_task() here because we already hold tasklist_lock.) +- * +- * If it's a zombie, our attachedness prevented normal parent notification +- * or self-reaping. Do notification now if it would have happened earlier. +- * If it should reap itself, return true. +- * +- * If it's our own child, there is no notification to do. But if our normal +- * children self-reap, then this child was prevented by ptrace and we must +- * reap it now, in that case we must also wake up sub-threads sleeping in +- * do_wait(). ++/** ++ * ptrace_traceme -- helper for PTRACE_TRACEME ++ * ++ * Performs checks and sets PT_PTRACED. ++ * Should be used by all ptrace implementations for PTRACE_TRACEME. + */ +-bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) ++int ptrace_traceme(void) + { +- __ptrace_unlink(p); ++ int ret = -EPERM; + +- if (p->exit_state == EXIT_ZOMBIE) { +- if (!task_detached(p) && thread_group_empty(p)) { +- if (!same_thread_group(p->real_parent, tracer)) +- do_notify_parent(p, p->exit_signal); +- else if (ignoring_children(tracer->sighand)) { +- __wake_up_parent(p, tracer); +- p->exit_signal = -1; +- } +- } +- if (task_detached(p)) { +- /* Mark it as in the process of being reaped. */ +- p->exit_state = EXIT_DEAD; +- return true; ++ write_lock_irq(&tasklist_lock); ++ /* Are we already being traced? */ ++ if (!current->ptrace) { ++ ret = security_ptrace_traceme(current->parent); ++ /* ++ * Check PF_EXITING to ensure ->real_parent has not passed ++ * exit_ptrace(). Otherwise we don't report the error but ++ * pretend ->real_parent untraces us right after return. ++ */ ++ if (!ret && !(current->real_parent->flags & PF_EXITING)) { ++ current->ptrace = PT_PTRACED; ++ __ptrace_link(current, current->real_parent); + } + } ++ write_unlock_irq(&tasklist_lock); + +- return false; ++ return ret; + } + + int ptrace_detach(struct task_struct *child, unsigned int data) +@@ -362,56 +570,6 @@ void exit_ptrace(struct task_struct *tra + } + } + +-int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) +-{ +- int copied = 0; +- +- while (len > 0) { +- char buf[128]; +- int this_len, retval; +- +- this_len = (len > sizeof(buf)) ? sizeof(buf) : len; +- retval = access_process_vm(tsk, src, buf, this_len, 0); +- if (!retval) { +- if (copied) +- break; +- return -EIO; +- } +- if (copy_to_user(dst, buf, retval)) +- return -EFAULT; +- copied += retval; +- src += retval; +- dst += retval; +- len -= retval; +- } +- return copied; +-} +- +-int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) +-{ +- int copied = 0; +- +- while (len > 0) { +- char buf[128]; +- int this_len, retval; +- +- this_len = (len > sizeof(buf)) ? sizeof(buf) : len; +- if (copy_from_user(buf, src, this_len)) +- return -EFAULT; +- retval = access_process_vm(tsk, dst, buf, this_len, 1); +- if (!retval) { +- if (copied) +- break; +- return -EIO; +- } +- copied += retval; +- src += retval; +- dst += retval; +- len -= retval; +- } +- return copied; +-} +- + static int ptrace_setoptions(struct task_struct *child, long data) + { + child->ptrace &= ~PT_TRACE_MASK; +@@ -526,47 +683,6 @@ static int ptrace_resume(struct task_str + return 0; + } + +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK +- +-static const struct user_regset * +-find_regset(const struct user_regset_view *view, unsigned int type) +-{ +- const struct user_regset *regset; +- int n; +- +- for (n = 0; n < view->n; ++n) { +- regset = view->regsets + n; +- if (regset->core_note_type == type) +- return regset; +- } +- +- return NULL; +-} +- +-static int ptrace_regset(struct task_struct *task, int req, unsigned int type, +- struct iovec *kiov) +-{ +- const struct user_regset_view *view = task_user_regset_view(task); +- const struct user_regset *regset = find_regset(view, type); +- int regset_no; +- +- if (!regset || (kiov->iov_len % regset->size) != 0) +- return -EINVAL; +- +- regset_no = regset - view->regsets; +- kiov->iov_len = min(kiov->iov_len, +- (__kernel_size_t) (regset->n * regset->size)); +- +- if (req == PTRACE_GETREGSET) +- return copy_regset_to_user(task, view, regset_no, 0, +- kiov->iov_len, kiov->iov_base); +- else +- return copy_regset_from_user(task, view, regset_no, 0, +- kiov->iov_len, kiov->iov_base); +-} +- +-#endif +- + int ptrace_request(struct task_struct *child, long request, + long addr, long data) + { +@@ -656,93 +772,7 @@ int ptrace_request(struct task_struct *c + return ret; + } + +-static struct task_struct *ptrace_get_task_struct(pid_t pid) +-{ +- struct task_struct *child; +- +- rcu_read_lock(); +- child = find_task_by_vpid(pid); +- if (child) +- get_task_struct(child); +- rcu_read_unlock(); +- +- if (!child) +- return ERR_PTR(-ESRCH); +- return child; +-} +- +-#ifndef arch_ptrace_attach +-#define arch_ptrace_attach(child) do { } while (0) +-#endif +- +-SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) +-{ +- struct task_struct *child; +- long ret; +- +- /* +- * This lock_kernel fixes a subtle race with suid exec +- */ +- lock_kernel(); +- if (request == PTRACE_TRACEME) { +- ret = ptrace_traceme(); +- if (!ret) +- arch_ptrace_attach(current); +- goto out; +- } +- +- child = ptrace_get_task_struct(pid); +- if (IS_ERR(child)) { +- ret = PTR_ERR(child); +- goto out; +- } +- +- if (request == PTRACE_ATTACH) { +- ret = ptrace_attach(child); +- /* +- * Some architectures need to do book-keeping after +- * a ptrace attach. +- */ +- if (!ret) +- arch_ptrace_attach(child); +- goto out_put_task_struct; +- } +- +- ret = ptrace_check_attach(child, request == PTRACE_KILL); +- if (ret < 0) +- goto out_put_task_struct; +- +- ret = arch_ptrace(child, request, addr, data); +- +- out_put_task_struct: +- put_task_struct(child); +- out: +- unlock_kernel(); +- return ret; +-} +- +-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) +-{ +- unsigned long tmp; +- int copied; +- +- copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); +- if (copied != sizeof(tmp)) +- return -EIO; +- return put_user(tmp, (unsigned long __user *)data); +-} +- +-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) +-{ +- int copied; +- +- copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); +- return (copied == sizeof(data)) ? 0 : -EIO; +-} +- + #if defined CONFIG_COMPAT +-#include +- + int compat_ptrace_request(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data) + { +@@ -820,47 +850,5 @@ int compat_ptrace_request(struct task_st + + return ret; + } +- +-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, +- compat_long_t addr, compat_long_t data) +-{ +- struct task_struct *child; +- long ret; +- +- /* +- * This lock_kernel fixes a subtle race with suid exec +- */ +- lock_kernel(); +- if (request == PTRACE_TRACEME) { +- ret = ptrace_traceme(); +- goto out; +- } +- +- child = ptrace_get_task_struct(pid); +- if (IS_ERR(child)) { +- ret = PTR_ERR(child); +- goto out; +- } +- +- if (request == PTRACE_ATTACH) { +- ret = ptrace_attach(child); +- /* +- * Some architectures need to do book-keeping after +- * a ptrace attach. +- */ +- if (!ret) +- arch_ptrace_attach(child); +- goto out_put_task_struct; +- } +- +- ret = ptrace_check_attach(child, request == PTRACE_KILL); +- if (!ret) +- ret = compat_arch_ptrace(child, request, addr, data); +- +- out_put_task_struct: +- put_task_struct(child); +- out: +- unlock_kernel(); +- return ret; +-} + #endif /* CONFIG_COMPAT */ ++#endif /* CONFIG_UTRACE */ +diff --git a/kernel/utrace.c b/kernel/utrace.c +index f003e34..f5a9e2c 100644 +--- a/kernel/utrace.c ++++ b/kernel/utrace.c +@@ -811,6 +811,22 @@ relock: + spin_unlock_irq(&task->sighand->siglock); + spin_unlock(&utrace->lock); + ++ /* ++ * If ptrace is among the reasons for this stop, do its ++ * notification now. This could not just be done in ++ * ptrace's own event report callbacks because it has to ++ * be done after we are in TASK_TRACED. This makes the ++ * synchronization with ptrace_do_wait() work right. ++ * ++ * It's only because of the bad old overloading of the do_wait() ++ * logic for handling ptrace stops that we need this special case ++ * here. One day we will clean up ptrace so it does not need to ++ * work this way. New things that are designed sensibly don't need ++ * a wakeup that synchronizes with tasklist_lock and ->state, so ++ * the proper utrace API does not try to support this weirdness. ++ */ ++ ptrace_notify_stop(task); ++ + schedule(); + + utrace_finish_stop(); diff --git a/linux-2.6-utrace.patch b/linux-2.6-utrace.patch new file mode 100644 index 000000000..c95ffdb2d --- /dev/null +++ b/linux-2.6-utrace.patch @@ -0,0 +1,4163 @@ +utrace core + +This adds the utrace facility, a new modular interface in the kernel for +implementing user thread tracing and debugging. This fits on top of the +tracehook_* layer, so the new code is well-isolated. + +The new interface is in and the DocBook utrace book +describes it. It allows for multiple separate tracing engines to work in +parallel without interfering with each other. Higher-level tracing +facilities can be implemented as loadable kernel modules using this layer. + +The new facility is made optional under CONFIG_UTRACE. +When this is not enabled, no new code is added. +It can only be enabled on machines that have all the +prerequisites and select CONFIG_HAVE_ARCH_TRACEHOOK. + +In this initial version, utrace and ptrace do not play together at all. +If ptrace is attached to a thread, the attach calls in the utrace kernel +API return -EBUSY. If utrace is attached to a thread, the PTRACE_ATTACH +or PTRACE_TRACEME request will return EBUSY to userland. The old ptrace +code is otherwise unchanged and nothing using ptrace should be affected +by this patch as long as utrace is not used at the same time. In the +future we can clean up the ptrace implementation and rework it to use +the utrace API. + +Signed-off-by: Roland McGrath +--- + Documentation/DocBook/Makefile | 2 +- + Documentation/DocBook/utrace.tmpl | 590 +++++++++ + fs/proc/array.c | 3 + + include/linux/sched.h | 5 + + include/linux/tracehook.h | 87 ++- + include/linux/utrace.h | 692 +++++++++++ + init/Kconfig | 9 + + kernel/Makefile | 1 + + kernel/fork.c | 3 + + kernel/ptrace.c | 14 + + kernel/utrace.c | 2436 +++++++++++++++++++++++++++++++++++++ + 11 files changed, 3840 insertions(+), 2 deletions(-) + +diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile +index 325cfd1..a45e4e6 100644 +--- a/Documentation/DocBook/Makefile ++++ b/Documentation/DocBook/Makefile +@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml de + genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ + mac80211.xml debugobjects.xml sh.xml regulator.xml \ + alsa-driver-api.xml writing-an-alsa-driver.xml \ +- tracepoint.xml media.xml ++ tracepoint.xml utrace.xml media.xml + + ### + # The build process is as follows (targets): +diff --git a/Documentation/DocBook/utrace.tmpl b/Documentation/DocBook/utrace.tmpl +new file mode 100644 +index ...e149f49 100644 +--- /dev/null ++++ b/Documentation/DocBook/utrace.tmpl +@@ -0,0 +1,590 @@ ++ ++ ++ ++ ++ ++ The utrace User Debugging Infrastructure ++ ++ ++ ++ ++ utrace concepts ++ ++ Introduction ++ ++ ++ utrace is infrastructure code for tracing ++ and controlling user threads. This is the foundation for writing ++ tracing engines, which can be loadable kernel modules. ++ ++ ++ ++ The basic actors in utrace are the thread ++ and the tracing engine. A tracing engine is some body of code that ++ calls into the <linux/utrace.h> ++ interfaces, represented by a struct ++ utrace_engine_ops. (Usually it's a kernel module, ++ though the legacy ptrace support is a tracing ++ engine that is not in a kernel module.) The interface operates on ++ individual threads (struct task_struct). ++ If an engine wants to treat several threads as a group, that is up ++ to its higher-level code. ++ ++ ++ ++ Tracing begins by attaching an engine to a thread, using ++ utrace_attach_task or ++ utrace_attach_pid. If successful, it returns a ++ pointer that is the handle used in all other calls. ++ ++ ++ ++ ++ Events and Callbacks ++ ++ ++ An attached engine does nothing by default. An engine makes something ++ happen by requesting callbacks via utrace_set_events ++ and poking the thread with utrace_control. ++ The synchronization issues related to these two calls ++ are discussed further below in . ++ ++ ++ ++ Events are specified using the macro ++ UTRACE_EVENT(type). ++ Each event type is associated with a callback in struct ++ utrace_engine_ops. A tracing engine can leave unused ++ callbacks NULL. The only callbacks required ++ are those used by the event flags it sets. ++ ++ ++ ++ Many engines can be attached to each thread. When a thread has an ++ event, each engine gets a callback if it has set the event flag for ++ that event type. For most events, engines are called in the order they ++ attached. Engines that attach after the event has occurred do not get ++ callbacks for that event. This includes any new engines just attached ++ by an existing engine's callback function. Once the sequence of ++ callbacks for that one event has completed, such new engines are then ++ eligible in the next sequence that starts when there is another event. ++ ++ ++ ++ Event reporting callbacks have details particular to the event type, ++ but are all called in similar environments and have the same ++ constraints. Callbacks are made from safe points, where no locks ++ are held, no special resources are pinned (usually), and the ++ user-mode state of the thread is accessible. So, callback code has ++ a pretty free hand. But to be a good citizen, callback code should ++ never block for long periods. It is fine to block in ++ kmalloc and the like, but never wait for i/o or ++ for user mode to do something. If you need the thread to wait, use ++ UTRACE_STOP and return from the callback ++ quickly. When your i/o finishes or whatever, you can use ++ utrace_control to resume the thread. ++ ++ ++ ++ The UTRACE_EVENT(SYSCALL_ENTRY) event is a special ++ case. While other events happen in the kernel when it will return to ++ user mode soon, this event happens when entering the kernel before it ++ will proceed with the work requested from user mode. Because of this ++ difference, the report_syscall_entry callback is ++ special in two ways. For this event, engines are called in reverse of ++ the normal order (this includes the report_quiesce ++ call that precedes a report_syscall_entry call). ++ This preserves the semantics that the last engine to attach is called ++ "closest to user mode"--the engine that is first to see a thread's user ++ state when it enters the kernel is also the last to see that state when ++ the thread returns to user mode. For the same reason, if these ++ callbacks use UTRACE_STOP (see the next section), ++ the thread stops immediately after callbacks rather than only when it's ++ ready to return to user mode; when allowed to resume, it will actually ++ attempt the system call indicated by the register values at that time. ++ ++ ++ ++ ++ Stopping Safely ++ ++ Writing well-behaved callbacks ++ ++ ++ Well-behaved callbacks are important to maintain two essential ++ properties of the interface. The first of these is that unrelated ++ tracing engines should not interfere with each other. If your engine's ++ event callback does not return quickly, then another engine won't get ++ the event notification in a timely manner. The second important ++ property is that tracing should be as noninvasive as possible to the ++ normal operation of the system overall and of the traced thread in ++ particular. That is, attached tracing engines should not perturb a ++ thread's behavior, except to the extent that changing its user-visible ++ state is explicitly what you want to do. (Obviously some perturbation ++ is unavoidable, primarily timing changes, ranging from small delays due ++ to the overhead of tracing, to arbitrary pauses in user code execution ++ when a user stops a thread with a debugger for examination.) Even when ++ you explicitly want the perturbation of making the traced thread block, ++ just blocking directly in your callback has more unwanted effects. For ++ example, the CLONE event callbacks are called when ++ the new child thread has been created but not yet started running; the ++ child can never be scheduled until the CLONE ++ tracing callbacks return. (This allows engines tracing the parent to ++ attach to the child.) If a CLONE event callback ++ blocks the parent thread, it also prevents the child thread from ++ running (even to process a SIGKILL). If what you ++ want is to make both the parent and child block, then use ++ utrace_attach_task on the child and then use ++ UTRACE_STOP on both threads. A more crucial ++ problem with blocking in callbacks is that it can prevent ++ SIGKILL from working. A thread that is blocking ++ due to UTRACE_STOP will still wake up and die ++ immediately when sent a SIGKILL, as all threads ++ should. Relying on the utrace ++ infrastructure rather than on private synchronization calls in event ++ callbacks is an important way to help keep tracing robustly ++ noninvasive. ++ ++ ++ ++ ++ Using <constant>UTRACE_STOP</constant> ++ ++ ++ To control another thread and access its state, it must be stopped ++ with UTRACE_STOP. This means that it is ++ stopped and won't start running again while we access it. When a ++ thread is not already stopped, utrace_control ++ returns -EINPROGRESS and an engine must wait ++ for an event callback when the thread is ready to stop. The thread ++ may be running on another CPU or may be blocked. When it is ready ++ to be examined, it will make callbacks to engines that set the ++ UTRACE_EVENT(QUIESCE) event bit. To wake up an ++ interruptible wait, use UTRACE_INTERRUPT. ++ ++ ++ ++ As long as some engine has used UTRACE_STOP and ++ not called utrace_control to resume the thread, ++ then the thread will remain stopped. SIGKILL ++ will wake it up, but it will not run user code. When the stop is ++ cleared with utrace_control or a callback ++ return value, the thread starts running again. ++ (See also .) ++ ++ ++ ++ ++ ++ ++ Tear-down Races ++ ++ Primacy of <constant>SIGKILL</constant> ++ ++ Ordinarily synchronization issues for tracing engines are kept fairly ++ straightforward by using UTRACE_STOP. You ask a ++ thread to stop, and then once it makes the ++ report_quiesce callback it cannot do anything else ++ that would result in another callback, until you let it with a ++ utrace_control call. This simple arrangement ++ avoids complex and error-prone code in each one of a tracing engine's ++ event callbacks to keep them serialized with the engine's other ++ operations done on that thread from another thread of control. ++ However, giving tracing engines complete power to keep a traced thread ++ stuck in place runs afoul of a more important kind of simplicity that ++ the kernel overall guarantees: nothing can prevent or delay ++ SIGKILL from making a thread die and release its ++ resources. To preserve this important property of ++ SIGKILL, it as a special case can break ++ UTRACE_STOP like nothing else normally can. This ++ includes both explicit SIGKILL signals and the ++ implicit SIGKILL sent to each other thread in the ++ same thread group by a thread doing an exec, or processing a fatal ++ signal, or making an exit_group system call. A ++ tracing engine can prevent a thread from beginning the exit or exec or ++ dying by signal (other than SIGKILL) if it is ++ attached to that thread, but once the operation begins, no tracing ++ engine can prevent or delay all other threads in the same thread group ++ dying. ++ ++ ++ ++ Final callbacks ++ ++ The report_reap callback is always the final event ++ in the life cycle of a traced thread. Tracing engines can use this as ++ the trigger to clean up their own data structures. The ++ report_death callback is always the penultimate ++ event a tracing engine might see; it's seen unless the thread was ++ already in the midst of dying when the engine attached. Many tracing ++ engines will have no interest in when a parent reaps a dead process, ++ and nothing they want to do with a zombie thread once it dies; for ++ them, the report_death callback is the natural ++ place to clean up data structures and detach. To facilitate writing ++ such engines robustly, given the asynchrony of ++ SIGKILL, and without error-prone manual ++ implementation of synchronization schemes, the ++ utrace infrastructure provides some special ++ guarantees about the report_death and ++ report_reap callbacks. It still takes some care ++ to be sure your tracing engine is robust to tear-down races, but these ++ rules make it reasonably straightforward and concise to handle a lot of ++ corner cases correctly. ++ ++ ++ ++ Engine and task pointers ++ ++ The first sort of guarantee concerns the core data structures ++ themselves. struct utrace_engine is ++ a reference-counted data structure. While you hold a reference, an ++ engine pointer will always stay valid so that you can safely pass it to ++ any utrace call. Each call to ++ utrace_attach_task or ++ utrace_attach_pid returns an engine pointer with a ++ reference belonging to the caller. You own that reference until you ++ drop it using utrace_engine_put. There is an ++ implicit reference on the engine while it is attached. So if you drop ++ your only reference, and then use ++ utrace_attach_task without ++ UTRACE_ATTACH_CREATE to look up that same engine, ++ you will get the same pointer with a new reference to replace the one ++ you dropped, just like calling utrace_engine_get. ++ When an engine has been detached, either explicitly with ++ UTRACE_DETACH or implicitly after ++ report_reap, then any references you hold are all ++ that keep the old engine pointer alive. ++ ++ ++ ++ There is nothing a kernel module can do to keep a struct ++ task_struct alive outside of ++ rcu_read_lock. When the task dies and is reaped ++ by its parent (or itself), that structure can be freed so that any ++ dangling pointers you have stored become invalid. ++ utrace will not prevent this, but it can ++ help you detect it safely. By definition, a task that has been reaped ++ has had all its engines detached. All ++ utrace calls can be safely called on a ++ detached engine if the caller holds a reference on that engine pointer, ++ even if the task pointer passed in the call is invalid. All calls ++ return -ESRCH for a detached engine, which tells ++ you that the task pointer you passed could be invalid now. Since ++ utrace_control and ++ utrace_set_events do not block, you can call those ++ inside a rcu_read_lock section and be sure after ++ they don't return -ESRCH that the task pointer is ++ still valid until rcu_read_unlock. The ++ infrastructure never holds task references of its own. Though neither ++ rcu_read_lock nor any other lock is held while ++ making a callback, it's always guaranteed that the struct ++ task_struct and the struct ++ utrace_engine passed as arguments remain valid ++ until the callback function returns. ++ ++ ++ ++ The common means for safely holding task pointers that is available to ++ kernel modules is to use struct pid, which ++ permits put_pid from kernel modules. When using ++ that, the calls utrace_attach_pid, ++ utrace_control_pid, ++ utrace_set_events_pid, and ++ utrace_barrier_pid are available. ++ ++ ++ ++ ++ ++ Serialization of <constant>DEATH</constant> and <constant>REAP</constant> ++ ++ ++ The second guarantee is the serialization of ++ DEATH and REAP event ++ callbacks for a given thread. The actual reaping by the parent ++ (release_task call) can occur simultaneously ++ while the thread is still doing the final steps of dying, including ++ the report_death callback. If a tracing engine ++ has requested both DEATH and ++ REAP event reports, it's guaranteed that the ++ report_reap callback will not be made until ++ after the report_death callback has returned. ++ If the report_death callback itself detaches ++ from the thread, then the report_reap callback ++ will never be made. Thus it is safe for a ++ report_death callback to clean up data ++ structures and detach. ++ ++ ++ ++ Interlock with final callbacks ++ ++ The final sort of guarantee is that a tracing engine will know for sure ++ whether or not the report_death and/or ++ report_reap callbacks will be made for a certain ++ thread. These tear-down races are disambiguated by the error return ++ values of utrace_set_events and ++ utrace_control. Normally ++ utrace_control called with ++ UTRACE_DETACH returns zero, and this means that no ++ more callbacks will be made. If the thread is in the midst of dying, ++ it returns -EALREADY to indicate that the ++ report_death callback may already be in progress; ++ when you get this error, you know that any cleanup your ++ report_death callback does is about to happen or ++ has just happened--note that if the report_death ++ callback does not detach, the engine remains attached until the thread ++ gets reaped. If the thread is in the midst of being reaped, ++ utrace_control returns -ESRCH ++ to indicate that the report_reap callback may ++ already be in progress; this means the engine is implicitly detached ++ when the callback completes. This makes it possible for a tracing ++ engine that has decided asynchronously to detach from a thread to ++ safely clean up its data structures, knowing that no ++ report_death or report_reap ++ callback will try to do the same. utrace_detach ++ returns -ESRCH when the struct ++ utrace_engine has already been detached, but is ++ still a valid pointer because of its reference count. A tracing engine ++ can use this to safely synchronize its own independent multiple threads ++ of control with each other and with its event callbacks that detach. ++ ++ ++ ++ In the same vein, utrace_set_events normally ++ returns zero; if the target thread was stopped before the call, then ++ after a successful call, no event callbacks not requested in the new ++ flags will be made. It fails with -EALREADY if ++ you try to clear UTRACE_EVENT(DEATH) when the ++ report_death callback may already have begun, if ++ you try to clear UTRACE_EVENT(REAP) when the ++ report_reap callback may already have begun, or if ++ you try to newly set UTRACE_EVENT(DEATH) or ++ UTRACE_EVENT(QUIESCE) when the target is already ++ dead or dying. Like utrace_control, it returns ++ -ESRCH when the thread has already been detached ++ (including forcible detach on reaping). This lets the tracing engine ++ know for sure which event callbacks it will or won't see after ++ utrace_set_events has returned. By checking for ++ errors, it can know whether to clean up its data structures immediately ++ or to let its callbacks do the work. ++ ++ ++ ++ Using <function>utrace_barrier</function> ++ ++ When a thread is safely stopped, calling ++ utrace_control with UTRACE_DETACH ++ or calling utrace_set_events to disable some events ++ ensures synchronously that your engine won't get any more of the callbacks ++ that have been disabled (none at all when detaching). But these can also ++ be used while the thread is not stopped, when it might be simultaneously ++ making a callback to your engine. For this situation, these calls return ++ -EINPROGRESS when it's possible a callback is in ++ progress. If you are not prepared to have your old callbacks still run, ++ then you can synchronize to be sure all the old callbacks are finished, ++ using utrace_barrier. This is necessary if the ++ kernel module containing your callback code is going to be unloaded. ++ ++ ++ After using UTRACE_DETACH once, further calls to ++ utrace_control with the same engine pointer will ++ return -ESRCH. In contrast, after getting ++ -EINPROGRESS from ++ utrace_set_events, you can call ++ utrace_set_events again later and if it returns zero ++ then know the old callbacks have finished. ++ ++ ++ Unlike all other calls, utrace_barrier (and ++ utrace_barrier_pid) will accept any engine pointer you ++ hold a reference on, even if UTRACE_DETACH has already ++ been used. After any utrace_control or ++ utrace_set_events call (these do not block), you can ++ call utrace_barrier to block until callbacks have ++ finished. This returns -ESRCH only if the engine is ++ completely detached (finished all callbacks). Otherwise it waits ++ until the thread is definitely not in the midst of a callback to this ++ engine and then returns zero, but can return ++ -ERESTARTSYS if its wait is interrupted. ++ ++ ++ ++ ++ ++ ++ ++utrace core API ++ ++ ++ The utrace API is declared in <linux/utrace.h>. ++ ++ ++!Iinclude/linux/utrace.h ++!Ekernel/utrace.c ++ ++ ++ ++Machine State ++ ++ ++ The task_current_syscall function can be used on any ++ valid struct task_struct at any time, and does ++ not even require that utrace_attach_task was used at all. ++ ++ ++ ++ The other ways to access the registers and other machine-dependent state of ++ a task can only be used on a task that is at a known safe point. The safe ++ points are all the places where utrace_set_events can ++ request callbacks (except for the DEATH and ++ REAP events). So at any event callback, it is safe to ++ examine current. ++ ++ ++ ++ One task can examine another only after a callback in the target task that ++ returns UTRACE_STOP so that task will not return to user ++ mode after the safe point. This guarantees that the task will not resume ++ until the same engine uses utrace_control, unless the ++ task dies suddenly. To examine safely, one must use a pair of calls to ++ utrace_prepare_examine and ++ utrace_finish_examine surrounding the calls to ++ struct user_regset functions or direct examination ++ of task data structures. utrace_prepare_examine returns ++ an error if the task is not properly stopped, or is dead. After a ++ successful examination, the paired utrace_finish_examine ++ call returns an error if the task ever woke up during the examination. If ++ so, any data gathered may be scrambled and should be discarded. This means ++ there was a spurious wake-up (which should not happen), or a sudden death. ++ ++ ++<structname>struct user_regset</structname> ++ ++ ++ The struct user_regset API ++ is declared in <linux/regset.h>. ++ ++ ++!Finclude/linux/regset.h ++ ++ ++ ++ ++ <filename>System Call Information</filename> ++ ++ ++ This function is declared in <linux/ptrace.h>. ++ ++ ++!Elib/syscall.c ++ ++ ++ ++<filename>System Call Tracing</filename> ++ ++ ++ The arch API for system call information is declared in ++ <asm/syscall.h>. ++ Each of these calls can be used only at system call entry tracing, ++ or can be used only at system call exit and the subsequent safe points ++ before returning to user mode. ++ At system call entry tracing means either during a ++ report_syscall_entry callback, ++ or any time after that callback has returned UTRACE_STOP. ++ ++ ++!Finclude/asm-generic/syscall.h ++ ++ ++ ++ ++ ++Kernel Internals ++ ++ ++ This chapter covers the interface to the tracing infrastructure ++ from the core of the kernel and the architecture-specific code. ++ This is for maintainers of the kernel and arch code, and not relevant ++ to using the tracing facilities described in preceding chapters. ++ ++ ++Core Calls In ++ ++ ++ These calls are declared in <linux/tracehook.h>. ++ The core kernel calls these functions at various important places. ++ ++ ++!Finclude/linux/tracehook.h ++ ++ ++ ++Architecture Calls Out ++ ++ ++ An arch that has done all these things sets ++ CONFIG_HAVE_ARCH_TRACEHOOK. ++ This is required to enable the utrace code. ++ ++ ++<filename><asm/ptrace.h></filename> ++ ++ ++ An arch defines these in <asm/ptrace.h> ++ if it supports hardware single-step or block-step features. ++ ++ ++!Finclude/linux/ptrace.h arch_has_single_step arch_has_block_step ++!Finclude/linux/ptrace.h user_enable_single_step user_enable_block_step ++!Finclude/linux/ptrace.h user_disable_single_step ++ ++ ++ ++ ++ <filename><asm/syscall.h></filename> ++ ++ ++ An arch provides <asm/syscall.h> that ++ defines these as inlines, or declares them as exported functions. ++ These interfaces are described in . ++ ++ ++ ++ ++ ++ <filename><linux/tracehook.h></filename> ++ ++ ++ An arch must define TIF_NOTIFY_RESUME ++ and TIF_SYSCALL_TRACE ++ in its <asm/thread_info.h>. ++ The arch code must call the following functions, all declared ++ in <linux/tracehook.h> and ++ described in : ++ ++ ++ ++ tracehook_notify_resume ++ ++ ++ tracehook_report_syscall_entry ++ ++ ++ tracehook_report_syscall_exit ++ ++ ++ tracehook_signal_handler ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 885ab55..b4d0b8a 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -81,6 +81,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -192,6 +193,8 @@ static inline void task_state(struct seq + cred->uid, cred->euid, cred->suid, cred->fsuid, + cred->gid, cred->egid, cred->sgid, cred->fsgid); + ++ task_utrace_proc_status(m, p); ++ + task_lock(p); + if (p->files) + fdt = files_fdtable(p->files); +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 82e4494..8461a2d 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1352,6 +1352,11 @@ struct task_struct { + #endif + seccomp_t seccomp; + ++#ifdef CONFIG_UTRACE ++ struct utrace *utrace; ++ unsigned long utrace_flags; ++#endif ++ + /* Thread group tracking */ + u32 parent_exec_id; + u32 self_exec_id; +diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h +index c78b2f4..71fa250 100644 +--- a/include/linux/tracehook.h ++++ b/include/linux/tracehook.h +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + struct linux_binprm; + + /** +@@ -63,6 +64,8 @@ struct linux_binprm; + */ + static inline int tracehook_expect_breakpoints(struct task_struct *task) + { ++ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_CORE))) ++ return 1; + return (task_ptrace(task) & PT_PTRACED) != 0; + } + +@@ -111,6 +114,9 @@ static inline void ptrace_report_syscall + static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) + { ++ if ((task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_ENTRY)) && ++ utrace_report_syscall_entry(regs)) ++ return 1; + ptrace_report_syscall(regs); + return 0; + } +@@ -134,6 +140,9 @@ static inline __must_check int tracehook + */ + static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) + { ++ if (task_utrace_flags(current) & UTRACE_EVENT(SYSCALL_EXIT)) ++ utrace_report_syscall_exit(regs); ++ + if (step && (task_ptrace(current) & PT_PTRACED)) { + siginfo_t info; + user_single_step_siginfo(current, regs, &info); +@@ -201,6 +210,8 @@ static inline void tracehook_report_exec + struct linux_binprm *bprm, + struct pt_regs *regs) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXEC))) ++ utrace_report_exec(fmt, bprm, regs); + if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && + unlikely(task_ptrace(current) & PT_PTRACED)) + send_sig(SIGTRAP, current, 0); +@@ -218,10 +229,37 @@ static inline void tracehook_report_exec + */ + static inline void tracehook_report_exit(long *exit_code) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(EXIT))) ++ utrace_report_exit(exit_code); + ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); + } + + /** ++ * tracehook_init_task - task_struct has just been copied ++ * @task: new &struct task_struct just copied from parent ++ * ++ * Called from do_fork() when @task has just been duplicated. ++ * After this, @task will be passed to tracehook_free_task() ++ * even if the rest of its setup fails before it is fully created. ++ */ ++static inline void tracehook_init_task(struct task_struct *task) ++{ ++ utrace_init_task(task); ++} ++ ++/** ++ * tracehook_free_task - task_struct is being freed ++ * @task: dead &struct task_struct being freed ++ * ++ * Called from free_task() when @task is no longer in use. ++ */ ++static inline void tracehook_free_task(struct task_struct *task) ++{ ++ if (task_utrace_struct(task)) ++ utrace_free_task(task); ++} ++ ++/** + * tracehook_prepare_clone - prepare for new child to be cloned + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * +@@ -285,6 +323,8 @@ static inline void tracehook_report_clon + unsigned long clone_flags, + pid_t pid, struct task_struct *child) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE))) ++ utrace_report_clone(clone_flags, child); + if (unlikely(task_ptrace(child))) { + /* + * It doesn't matter who attached/attaching to this +@@ -317,6 +357,9 @@ static inline void tracehook_report_clon + pid_t pid, + struct task_struct *child) + { ++ if (unlikely(task_utrace_flags(current) & UTRACE_EVENT(CLONE)) && ++ (clone_flags & CLONE_VFORK)) ++ utrace_finish_vfork(current); + if (unlikely(trace)) + ptrace_event(0, trace, pid); + } +@@ -351,6 +394,10 @@ static inline void tracehook_report_vfor + */ + static inline void tracehook_prepare_release_task(struct task_struct *task) + { ++ /* see utrace_add_engine() about this barrier */ ++ smp_mb(); ++ if (task_utrace_flags(task)) ++ utrace_maybe_reap(task, task_utrace_struct(task), true); + } + + /** +@@ -365,6 +412,7 @@ static inline void tracehook_prepare_rel + static inline void tracehook_finish_release_task(struct task_struct *task) + { + ptrace_release_task(task); ++ BUG_ON(task->exit_state != EXIT_DEAD); + } + + /** +@@ -386,6 +434,8 @@ static inline void tracehook_signal_hand + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) + { ++ if (task_utrace_flags(current)) ++ utrace_signal_handler(current, stepping); + if (stepping && (task_ptrace(current) & PT_PTRACED)) + ptrace_notify(SIGTRAP); + } +@@ -403,6 +453,8 @@ static inline void tracehook_signal_hand + static inline int tracehook_consider_ignored_signal(struct task_struct *task, + int sig) + { ++ if (unlikely(task_utrace_flags(task) & UTRACE_EVENT(SIGNAL_IGN))) ++ return 1; + return (task_ptrace(task) & PT_PTRACED) != 0; + } + +@@ -422,6 +474,9 @@ static inline int tracehook_consider_ign + static inline int tracehook_consider_fatal_signal(struct task_struct *task, + int sig) + { ++ if (unlikely(task_utrace_flags(task) & (UTRACE_EVENT(SIGNAL_TERM) | ++ UTRACE_EVENT(SIGNAL_CORE)))) ++ return 1; + return (task_ptrace(task) & PT_PTRACED) != 0; + } + +@@ -436,6 +491,8 @@ static inline int tracehook_consider_fat + */ + static inline int tracehook_force_sigpending(void) + { ++ if (unlikely(task_utrace_flags(current))) ++ return utrace_interrupt_pending(); + return 0; + } + +@@ -465,6 +522,8 @@ static inline int tracehook_get_signal(s + siginfo_t *info, + struct k_sigaction *return_ka) + { ++ if (unlikely(task_utrace_flags(task))) ++ return utrace_get_signal(task, regs, info, return_ka); + return 0; + } + +@@ -492,6 +551,8 @@ static inline int tracehook_get_signal(s + */ + static inline int tracehook_notify_jctl(int notify, int why) + { ++ if (task_utrace_flags(current) & UTRACE_EVENT(JCTL)) ++ utrace_report_jctl(notify, why); + return notify ?: task_ptrace(current) ? why : 0; + } + +@@ -502,6 +563,8 @@ static inline int tracehook_notify_jctl( + */ + static inline void tracehook_finish_jctl(void) + { ++ if (task_utrace_flags(current)) ++ utrace_finish_stop(); + } + + #define DEATH_REAP -1 +@@ -524,6 +587,8 @@ static inline void tracehook_finish_jctl + static inline int tracehook_notify_death(struct task_struct *task, + void **death_cookie, int group_dead) + { ++ *death_cookie = task_utrace_struct(task); ++ + if (task_detached(task)) + return task->ptrace ? SIGCHLD : DEATH_REAP; + +@@ -560,6 +625,15 @@ static inline void tracehook_report_deat + int signal, void *death_cookie, + int group_dead) + { ++ /* ++ * If utrace_set_events() was just called to enable ++ * UTRACE_EVENT(DEATH), then we are obliged to call ++ * utrace_report_death() and not miss it. utrace_set_events() ++ * checks @task->exit_state under tasklist_lock to synchronize ++ * with exit_notify(), the caller. ++ */ ++ if (task_utrace_flags(task) & _UTRACE_DEATH_EVENTS) ++ utrace_report_death(task, death_cookie, group_dead, signal); + } + + #ifdef TIF_NOTIFY_RESUME +@@ -589,10 +663,21 @@ static inline void set_notify_resume(str + * asynchronously, this will be called again before we return to + * user mode. + * +- * Called without locks. ++ * Called without locks. However, on some machines this may be ++ * called with interrupts disabled. + */ + static inline void tracehook_notify_resume(struct pt_regs *regs) + { ++ struct task_struct *task = current; ++ /* ++ * Prevent the following store/load from getting ahead of the ++ * caller which clears TIF_NOTIFY_RESUME. This pairs with the ++ * implicit mb() before setting TIF_NOTIFY_RESUME in ++ * set_notify_resume(). ++ */ ++ smp_mb(); ++ if (task_utrace_flags(task)) ++ utrace_resume(task, regs); + } + #endif /* TIF_NOTIFY_RESUME */ + +diff --git a/include/linux/utrace.h b/include/linux/utrace.h +new file mode 100644 +index ...f251efe 100644 +--- /dev/null ++++ b/include/linux/utrace.h +@@ -0,0 +1,692 @@ ++/* ++ * utrace infrastructure interface for debugging user processes ++ * ++ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU General Public License v.2. ++ * ++ * Red Hat Author: Roland McGrath. ++ * ++ * This interface allows for notification of interesting events in a ++ * thread. It also mediates access to thread state such as registers. ++ * Multiple unrelated users can be associated with a single thread. ++ * We call each of these a tracing engine. ++ * ++ * A tracing engine starts by calling utrace_attach_task() or ++ * utrace_attach_pid() on the chosen thread, passing in a set of hooks ++ * (&struct utrace_engine_ops), and some associated data. This produces a ++ * &struct utrace_engine, which is the handle used for all other ++ * operations. An attached engine has its ops vector, its data, and an ++ * event mask controlled by utrace_set_events(). ++ * ++ * For each event bit that is set, that engine will get the ++ * appropriate ops->report_*() callback when the event occurs. The ++ * &struct utrace_engine_ops need not provide callbacks for an event ++ * unless the engine sets one of the associated event bits. ++ */ ++ ++#ifndef _LINUX_UTRACE_H ++#define _LINUX_UTRACE_H 1 ++ ++#include ++#include ++#include ++#include ++ ++struct linux_binprm; ++struct pt_regs; ++struct utrace; ++struct user_regset; ++struct user_regset_view; ++ ++/* ++ * Event bits passed to utrace_set_events(). ++ * These appear in &struct task_struct.@utrace_flags ++ * and &struct utrace_engine.@flags. ++ */ ++enum utrace_events { ++ _UTRACE_EVENT_QUIESCE, /* Thread is available for examination. */ ++ _UTRACE_EVENT_REAP, /* Zombie reaped, no more tracing possible. */ ++ _UTRACE_EVENT_CLONE, /* Successful clone/fork/vfork just done. */ ++ _UTRACE_EVENT_EXEC, /* Successful execve just completed. */ ++ _UTRACE_EVENT_EXIT, /* Thread exit in progress. */ ++ _UTRACE_EVENT_DEATH, /* Thread has died. */ ++ _UTRACE_EVENT_SYSCALL_ENTRY, /* User entered kernel for system call. */ ++ _UTRACE_EVENT_SYSCALL_EXIT, /* Returning to user after system call. */ ++ _UTRACE_EVENT_SIGNAL, /* Signal delivery will run a user handler. */ ++ _UTRACE_EVENT_SIGNAL_IGN, /* No-op signal to be delivered. */ ++ _UTRACE_EVENT_SIGNAL_STOP, /* Signal delivery will suspend. */ ++ _UTRACE_EVENT_SIGNAL_TERM, /* Signal delivery will terminate. */ ++ _UTRACE_EVENT_SIGNAL_CORE, /* Signal delivery will dump core. */ ++ _UTRACE_EVENT_JCTL, /* Job control stop or continue completed. */ ++ _UTRACE_NEVENTS ++}; ++#define UTRACE_EVENT(type) (1UL << _UTRACE_EVENT_##type) ++ ++/* ++ * All the kinds of signal events. ++ * These all use the @report_signal() callback. ++ */ ++#define UTRACE_EVENT_SIGNAL_ALL (UTRACE_EVENT(SIGNAL) \ ++ | UTRACE_EVENT(SIGNAL_IGN) \ ++ | UTRACE_EVENT(SIGNAL_STOP) \ ++ | UTRACE_EVENT(SIGNAL_TERM) \ ++ | UTRACE_EVENT(SIGNAL_CORE)) ++/* ++ * Both kinds of syscall events; these call the @report_syscall_entry() ++ * and @report_syscall_exit() callbacks, respectively. ++ */ ++#define UTRACE_EVENT_SYSCALL \ ++ (UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT)) ++ ++/* ++ * The event reports triggered synchronously by task death. ++ */ ++#define _UTRACE_DEATH_EVENTS (UTRACE_EVENT(DEATH) | UTRACE_EVENT(QUIESCE)) ++ ++/* ++ * Hooks in call these entry points to the utrace dispatch. ++ */ ++void utrace_free_task(struct task_struct *); ++bool utrace_interrupt_pending(void); ++void utrace_resume(struct task_struct *, struct pt_regs *); ++void utrace_finish_stop(void); ++void utrace_maybe_reap(struct task_struct *, struct utrace *, bool); ++int utrace_get_signal(struct task_struct *, struct pt_regs *, ++ siginfo_t *, struct k_sigaction *); ++void utrace_report_clone(unsigned long, struct task_struct *); ++void utrace_finish_vfork(struct task_struct *); ++void utrace_report_exit(long *exit_code); ++void utrace_report_death(struct task_struct *, struct utrace *, bool, int); ++void utrace_report_jctl(int notify, int type); ++void utrace_report_exec(struct linux_binfmt *, struct linux_binprm *, ++ struct pt_regs *regs); ++bool utrace_report_syscall_entry(struct pt_regs *); ++void utrace_report_syscall_exit(struct pt_regs *); ++void utrace_signal_handler(struct task_struct *, int); ++ ++#ifndef CONFIG_UTRACE ++ ++/* ++ * uses these accessors to avoid #ifdef CONFIG_UTRACE. ++ */ ++static inline unsigned long task_utrace_flags(struct task_struct *task) ++{ ++ return 0; ++} ++static inline struct utrace *task_utrace_struct(struct task_struct *task) ++{ ++ return NULL; ++} ++static inline void utrace_init_task(struct task_struct *child) ++{ ++} ++ ++static inline void task_utrace_proc_status(struct seq_file *m, ++ struct task_struct *p) ++{ ++} ++ ++#else /* CONFIG_UTRACE */ ++ ++static inline unsigned long task_utrace_flags(struct task_struct *task) ++{ ++ return task->utrace_flags; ++} ++ ++static inline struct utrace *task_utrace_struct(struct task_struct *task) ++{ ++ struct utrace *utrace; ++ ++ /* ++ * This barrier ensures that any prior load of task->utrace_flags ++ * is ordered before this load of task->utrace. We use those ++ * utrace_flags checks in the hot path to decide to call into ++ * the utrace code. The first attach installs task->utrace before ++ * setting task->utrace_flags nonzero with implicit barrier in ++ * between, see utrace_add_engine(). ++ */ ++ smp_rmb(); ++ utrace = task->utrace; ++ ++ smp_read_barrier_depends(); /* See utrace_task_alloc(). */ ++ return utrace; ++} ++ ++static inline void utrace_init_task(struct task_struct *task) ++{ ++ task->utrace_flags = 0; ++ task->utrace = NULL; ++} ++ ++void task_utrace_proc_status(struct seq_file *m, struct task_struct *p); ++ ++ ++/* ++ * Version number of the API defined in this file. This will change ++ * whenever a tracing engine's code would need some updates to keep ++ * working. We maintain this here for the benefit of tracing engine code ++ * that is developed concurrently with utrace API improvements before they ++ * are merged into the kernel, making LINUX_VERSION_CODE checks unwieldy. ++ */ ++#define UTRACE_API_VERSION 20091216 ++ ++/** ++ * enum utrace_resume_action - engine's choice of action for a traced task ++ * @UTRACE_STOP: Stay quiescent after callbacks. ++ * @UTRACE_INTERRUPT: Make @report_signal() callback soon. ++ * @UTRACE_REPORT: Make some callback soon. ++ * @UTRACE_SINGLESTEP: Resume in user mode for one instruction. ++ * @UTRACE_BLOCKSTEP: Resume in user mode until next branch. ++ * @UTRACE_RESUME: Resume normally in user mode. ++ * @UTRACE_DETACH: Detach my engine (implies %UTRACE_RESUME). ++ * ++ * See utrace_control() for detailed descriptions of each action. This is ++ * encoded in the @action argument and the return value for every callback ++ * with a &u32 return value. ++ * ++ * The order of these is important. When there is more than one engine, ++ * each supplies its choice and the smallest value prevails. ++ */ ++enum utrace_resume_action { ++ UTRACE_STOP, ++ UTRACE_INTERRUPT, ++ UTRACE_REPORT, ++ UTRACE_SINGLESTEP, ++ UTRACE_BLOCKSTEP, ++ UTRACE_RESUME, ++ UTRACE_DETACH, ++ UTRACE_RESUME_MAX ++}; ++#define UTRACE_RESUME_BITS (ilog2(UTRACE_RESUME_MAX) + 1) ++#define UTRACE_RESUME_MASK ((1 << UTRACE_RESUME_BITS) - 1) ++ ++/** ++ * utrace_resume_action - &enum utrace_resume_action from callback action ++ * @action: &u32 callback @action argument or return value ++ * ++ * This extracts the &enum utrace_resume_action from @action, ++ * which is the @action argument to a &struct utrace_engine_ops ++ * callback or the return value from one. ++ */ ++static inline enum utrace_resume_action utrace_resume_action(u32 action) ++{ ++ return action & UTRACE_RESUME_MASK; ++} ++ ++/** ++ * enum utrace_signal_action - disposition of signal ++ * @UTRACE_SIGNAL_DELIVER: Deliver according to sigaction. ++ * @UTRACE_SIGNAL_IGN: Ignore the signal. ++ * @UTRACE_SIGNAL_TERM: Terminate the process. ++ * @UTRACE_SIGNAL_CORE: Terminate with core dump. ++ * @UTRACE_SIGNAL_STOP: Deliver as absolute stop. ++ * @UTRACE_SIGNAL_TSTP: Deliver as job control stop. ++ * @UTRACE_SIGNAL_REPORT: Reporting before pending signals. ++ * @UTRACE_SIGNAL_HANDLER: Reporting after signal handler setup. ++ * ++ * This is encoded in the @action argument and the return value for ++ * a @report_signal() callback. It says what will happen to the ++ * signal described by the &siginfo_t parameter to the callback. ++ * ++ * The %UTRACE_SIGNAL_REPORT value is used in an @action argument when ++ * a tracing report is being made before dequeuing any pending signal. ++ * If this is immediately after a signal handler has been set up, then ++ * %UTRACE_SIGNAL_HANDLER is used instead. A @report_signal callback ++ * that uses %UTRACE_SIGNAL_DELIVER|%UTRACE_SINGLESTEP will ensure ++ * it sees a %UTRACE_SIGNAL_HANDLER report. ++ */ ++enum utrace_signal_action { ++ UTRACE_SIGNAL_DELIVER = 0x00, ++ UTRACE_SIGNAL_IGN = 0x10, ++ UTRACE_SIGNAL_TERM = 0x20, ++ UTRACE_SIGNAL_CORE = 0x30, ++ UTRACE_SIGNAL_STOP = 0x40, ++ UTRACE_SIGNAL_TSTP = 0x50, ++ UTRACE_SIGNAL_REPORT = 0x60, ++ UTRACE_SIGNAL_HANDLER = 0x70 ++}; ++#define UTRACE_SIGNAL_MASK 0xf0 ++#define UTRACE_SIGNAL_HOLD 0x100 /* Flag, push signal back on queue. */ ++ ++/** ++ * utrace_signal_action - &enum utrace_signal_action from callback action ++ * @action: @report_signal callback @action argument or return value ++ * ++ * This extracts the &enum utrace_signal_action from @action, which ++ * is the @action argument to a @report_signal callback or the ++ * return value from one. ++ */ ++static inline enum utrace_signal_action utrace_signal_action(u32 action) ++{ ++ return action & UTRACE_SIGNAL_MASK; ++} ++ ++/** ++ * enum utrace_syscall_action - disposition of system call attempt ++ * @UTRACE_SYSCALL_RUN: Run the system call. ++ * @UTRACE_SYSCALL_ABORT: Don't run the system call. ++ * ++ * This is encoded in the @action argument and the return value for ++ * a @report_syscall_entry callback. ++ */ ++enum utrace_syscall_action { ++ UTRACE_SYSCALL_RUN = 0x00, ++ UTRACE_SYSCALL_ABORT = 0x10 ++}; ++#define UTRACE_SYSCALL_MASK 0xf0 ++#define UTRACE_SYSCALL_RESUMED 0x100 /* Flag, report_syscall_entry() repeats */ ++ ++/** ++ * utrace_syscall_action - &enum utrace_syscall_action from callback action ++ * @action: @report_syscall_entry callback @action or return value ++ * ++ * This extracts the &enum utrace_syscall_action from @action, which ++ * is the @action argument to a @report_syscall_entry callback or the ++ * return value from one. ++ */ ++static inline enum utrace_syscall_action utrace_syscall_action(u32 action) ++{ ++ return action & UTRACE_SYSCALL_MASK; ++} ++ ++/* ++ * Flags for utrace_attach_task() and utrace_attach_pid(). ++ */ ++#define UTRACE_ATTACH_MATCH_OPS 0x0001 /* Match engines on ops. */ ++#define UTRACE_ATTACH_MATCH_DATA 0x0002 /* Match engines on data. */ ++#define UTRACE_ATTACH_MATCH_MASK 0x000f ++#define UTRACE_ATTACH_CREATE 0x0010 /* Attach a new engine. */ ++#define UTRACE_ATTACH_EXCLUSIVE 0x0020 /* Refuse if existing match. */ ++ ++/** ++ * struct utrace_engine - per-engine structure ++ * @ops: &struct utrace_engine_ops pointer passed to utrace_attach_task() ++ * @data: engine-private &void * passed to utrace_attach_task() ++ * @flags: event mask set by utrace_set_events() plus internal flag bits ++ * ++ * The task itself never has to worry about engines detaching while ++ * it's doing event callbacks. These structures are removed from the ++ * task's active list only when it's stopped, or by the task itself. ++ * ++ * utrace_engine_get() and utrace_engine_put() maintain a reference count. ++ * When it drops to zero, the structure is freed. One reference is held ++ * implicitly while the engine is attached to its task. ++ */ ++struct utrace_engine { ++/* private: */ ++ struct kref kref; ++ void (*release)(void *); ++ struct list_head entry; ++ ++/* public: */ ++ const struct utrace_engine_ops *ops; ++ void *data; ++ ++ unsigned long flags; ++}; ++ ++/** ++ * utrace_engine_get - acquire a reference on a &struct utrace_engine ++ * @engine: &struct utrace_engine pointer ++ * ++ * You must hold a reference on @engine, and you get another. ++ */ ++static inline void utrace_engine_get(struct utrace_engine *engine) ++{ ++ kref_get(&engine->kref); ++} ++ ++void __utrace_engine_release(struct kref *); ++ ++/** ++ * utrace_engine_put - release a reference on a &struct utrace_engine ++ * @engine: &struct utrace_engine pointer ++ * ++ * You must hold a reference on @engine, and you lose that reference. ++ * If it was the last one, @engine becomes an invalid pointer. ++ */ ++static inline void utrace_engine_put(struct utrace_engine *engine) ++{ ++ kref_put(&engine->kref, __utrace_engine_release); ++} ++ ++/** ++ * struct utrace_engine_ops - tracing engine callbacks ++ * ++ * Each @report_*() callback corresponds to an %UTRACE_EVENT(*) bit. ++ * utrace_set_events() calls on @engine choose which callbacks will ++ * be made to @engine from @task. ++ * ++ * Most callbacks take an @action argument, giving the resume action ++ * chosen by other tracing engines. All callbacks take an @engine ++ * argument. The @report_reap callback takes a @task argument that ++ * might or might not be @current. All other @report_* callbacks ++ * report an event in the @current task. ++ * ++ * For some calls, @action also includes bits specific to that event ++ * and utrace_resume_action() is used to extract the resume action. ++ * This shows what would happen if @engine wasn't there, or will if ++ * the callback's return value uses %UTRACE_RESUME. This always ++ * starts as %UTRACE_RESUME when no other tracing is being done on ++ * this task. ++ * ++ * All return values contain &enum utrace_resume_action bits. For ++ * some calls, other bits specific to that kind of event are added to ++ * the resume action bits with OR. These are the same bits used in ++ * the @action argument. The resume action returned by a callback ++ * does not override previous engines' choices, it only says what ++ * @engine wants done. What @current actually does is the action that's ++ * most constrained among the choices made by all attached engines. ++ * See utrace_control() for more information on the actions. ++ * ++ * When %UTRACE_STOP is used in @report_syscall_entry, then @current ++ * stops before attempting the system call. In this case, another ++ * @report_syscall_entry callback will follow after @current resumes if ++ * %UTRACE_REPORT or %UTRACE_INTERRUPT was returned by some callback ++ * or passed to utrace_control(). In a second or later callback, ++ * %UTRACE_SYSCALL_RESUMED is set in the @action argument to indicate ++ * a repeat callback still waiting to attempt the same system call ++ * invocation. This repeat callback gives each engine an opportunity ++ * to reexamine registers another engine might have changed while ++ * @current was held in %UTRACE_STOP. ++ * ++ * In other cases, the resume action does not take effect until @current ++ * is ready to check for signals and return to user mode. If there ++ * are more callbacks to be made, the last round of calls determines ++ * the final action. A @report_quiesce callback with @event zero, or ++ * a @report_signal callback, will always be the last one made before ++ * @current resumes. Only %UTRACE_STOP is "sticky"--if @engine returned ++ * %UTRACE_STOP then @current stays stopped unless @engine returns ++ * different from a following callback. ++ * ++ * The report_death() and report_reap() callbacks do not take @action ++ * arguments, and only %UTRACE_DETACH is meaningful in the return value ++ * from a report_death() callback. None of the resume actions applies ++ * to a dead thread. ++ * ++ * All @report_*() hooks are called with no locks held, in a generally ++ * safe environment when we will be returning to user mode soon (or just ++ * entered the kernel). It is fine to block for memory allocation and ++ * the like, but all hooks are asynchronous and must not block on ++ * external events! If you want the thread to block, use %UTRACE_STOP ++ * in your hook's return value; then later wake it up with utrace_control(). ++ * ++ * @report_quiesce: ++ * Requested by %UTRACE_EVENT(%QUIESCE). ++ * This does not indicate any event, but just that @current is in a ++ * safe place for examination. This call is made before each specific ++ * event callback, except for @report_reap. The @event argument gives ++ * the %UTRACE_EVENT(@which) value for the event occurring. This ++ * callback might be made for events @engine has not requested, if ++ * some other engine is tracing the event; calling utrace_set_events() ++ * call here can request the immediate callback for this occurrence of ++ * @event. @event is zero when there is no other event, @current is ++ * now ready to check for signals and return to user mode, and some ++ * engine has used %UTRACE_REPORT or %UTRACE_INTERRUPT to request this ++ * callback. For this case, if @report_signal is not %NULL, the ++ * @report_quiesce callback may be replaced with a @report_signal ++ * callback passing %UTRACE_SIGNAL_REPORT in its @action argument, ++ * whenever @current is entering the signal-check path anyway. ++ * ++ * @report_signal: ++ * Requested by %UTRACE_EVENT(%SIGNAL_*) or %UTRACE_EVENT(%QUIESCE). ++ * Use utrace_signal_action() and utrace_resume_action() on @action. ++ * The signal action is %UTRACE_SIGNAL_REPORT when some engine has ++ * used %UTRACE_REPORT or %UTRACE_INTERRUPT; the callback can choose ++ * to stop or to deliver an artificial signal, before pending signals. ++ * It's %UTRACE_SIGNAL_HANDLER instead when signal handler setup just ++ * finished (after a previous %UTRACE_SIGNAL_DELIVER return); this ++ * serves in lieu of any %UTRACE_SIGNAL_REPORT callback requested by ++ * %UTRACE_REPORT or %UTRACE_INTERRUPT, and is also implicitly ++ * requested by %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP into the ++ * signal delivery. The other signal actions indicate a signal about ++ * to be delivered; the previous engine's return value sets the signal ++ * action seen by the the following engine's callback. The @info data ++ * can be changed at will, including @info->si_signo. The settings in ++ * @return_ka determines what %UTRACE_SIGNAL_DELIVER does. @orig_ka ++ * is what was in force before other tracing engines intervened, and ++ * it's %NULL when this report began as %UTRACE_SIGNAL_REPORT or ++ * %UTRACE_SIGNAL_HANDLER. For a report without a new signal, @info ++ * is left uninitialized and must be set completely by an engine that ++ * chooses to deliver a signal; if there was a previous @report_signal ++ * callback ending in %UTRACE_STOP and it was just resumed using ++ * %UTRACE_REPORT or %UTRACE_INTERRUPT, then @info is left unchanged ++ * from the previous callback. In this way, the original signal can ++ * be left in @info while returning %UTRACE_STOP|%UTRACE_SIGNAL_IGN ++ * and then found again when resuming with %UTRACE_INTERRUPT. ++ * The %UTRACE_SIGNAL_HOLD flag bit can be OR'd into the return value, ++ * and might be in @action if the previous engine returned it. This ++ * flag asks that the signal in @info be pushed back on @current's queue ++ * so that it will be seen again after whatever action is taken now. ++ * ++ * @report_clone: ++ * Requested by %UTRACE_EVENT(%CLONE). ++ * Event reported for parent, before the new task @child might run. ++ * @clone_flags gives the flags used in the clone system call, or ++ * equivalent flags for a fork() or vfork() system call. This ++ * function can use utrace_attach_task() on @child. Then passing ++ * %UTRACE_STOP to utrace_control() on @child here keeps the child ++ * stopped before it ever runs in user mode, %UTRACE_REPORT or ++ * %UTRACE_INTERRUPT ensures a callback from @child before it ++ * starts in user mode. ++ * ++ * @report_jctl: ++ * Requested by %UTRACE_EVENT(%JCTL). ++ * Job control event; @type is %CLD_STOPPED or %CLD_CONTINUED, ++ * indicating whether we are stopping or resuming now. If @notify ++ * is nonzero, @current is the last thread to stop and so will send ++ * %SIGCHLD to its parent after this callback; @notify reflects ++ * what the parent's %SIGCHLD has in @si_code, which can sometimes ++ * be %CLD_STOPPED even when @type is %CLD_CONTINUED. ++ * ++ * @report_exec: ++ * Requested by %UTRACE_EVENT(%EXEC). ++ * An execve system call has succeeded and the new program is about to ++ * start running. The initial user register state is handy to be tweaked ++ * directly in @regs. @fmt and @bprm gives the details of this exec. ++ * ++ * @report_syscall_entry: ++ * Requested by %UTRACE_EVENT(%SYSCALL_ENTRY). ++ * Thread has entered the kernel to request a system call. ++ * The user register state is handy to be tweaked directly in @regs. ++ * The @action argument contains an &enum utrace_syscall_action, ++ * use utrace_syscall_action() to extract it. The return value ++ * overrides the last engine's action for the system call. ++ * If the final action is %UTRACE_SYSCALL_ABORT, no system call ++ * is made. The details of the system call being attempted can ++ * be fetched here with syscall_get_nr() and syscall_get_arguments(). ++ * The parameter registers can be changed with syscall_set_arguments(). ++ * See above about the %UTRACE_SYSCALL_RESUMED flag in @action. ++ * Use %UTRACE_REPORT in the return value to guarantee you get ++ * another callback (with %UTRACE_SYSCALL_RESUMED flag) in case ++ * @current stops with %UTRACE_STOP before attempting the system call. ++ * ++ * @report_syscall_exit: ++ * Requested by %UTRACE_EVENT(%SYSCALL_EXIT). ++ * Thread is about to leave the kernel after a system call request. ++ * The user register state is handy to be tweaked directly in @regs. ++ * The results of the system call attempt can be examined here using ++ * syscall_get_error() and syscall_get_return_value(). It is safe ++ * here to call syscall_set_return_value() or syscall_rollback(). ++ * ++ * @report_exit: ++ * Requested by %UTRACE_EVENT(%EXIT). ++ * Thread is exiting and cannot be prevented from doing so, ++ * but all its state is still live. The @code value will be ++ * the wait result seen by the parent, and can be changed by ++ * this engine or others. The @orig_code value is the real ++ * status, not changed by any tracing engine. Returning %UTRACE_STOP ++ * here keeps @current stopped before it cleans up its state and dies, ++ * so it can be examined by other processes. When @current is allowed ++ * to run, it will die and get to the @report_death callback. ++ * ++ * @report_death: ++ * Requested by %UTRACE_EVENT(%DEATH). ++ * Thread is really dead now. It might be reaped by its parent at ++ * any time, or self-reap immediately. Though the actual reaping ++ * may happen in parallel, a report_reap() callback will always be ++ * ordered after a report_death() callback. ++ * ++ * @report_reap: ++ * Requested by %UTRACE_EVENT(%REAP). ++ * Called when someone reaps the dead task (parent, init, or self). ++ * This means the parent called wait, or else this was a detached ++ * thread or a process whose parent ignores SIGCHLD. ++ * No more callbacks are made after this one. ++ * The engine is always detached. ++ * There is nothing more a tracing engine can do about this thread. ++ * After this callback, the @engine pointer will become invalid. ++ * The @task pointer may become invalid if get_task_struct() hasn't ++ * been used to keep it alive. ++ * An engine should always request this callback if it stores the ++ * @engine pointer or stores any pointer in @engine->data, so it ++ * can clean up its data structures. ++ * Unlike other callbacks, this can be called from the parent's context ++ * rather than from the traced thread itself--it must not delay the ++ * parent by blocking. ++ * ++ * @release: ++ * If not %NULL, this is called after the last utrace_engine_put() ++ * call for a &struct utrace_engine, which could be implicit after ++ * a %UTRACE_DETACH return from another callback. Its argument is ++ * the engine's @data member. ++ */ ++struct utrace_engine_ops { ++ u32 (*report_quiesce)(u32 action, struct utrace_engine *engine, ++ unsigned long event); ++ u32 (*report_signal)(u32 action, struct utrace_engine *engine, ++ struct pt_regs *regs, ++ siginfo_t *info, ++ const struct k_sigaction *orig_ka, ++ struct k_sigaction *return_ka); ++ u32 (*report_clone)(u32 action, struct utrace_engine *engine, ++ unsigned long clone_flags, ++ struct task_struct *child); ++ u32 (*report_jctl)(u32 action, struct utrace_engine *engine, ++ int type, int notify); ++ u32 (*report_exec)(u32 action, struct utrace_engine *engine, ++ const struct linux_binfmt *fmt, ++ const struct linux_binprm *bprm, ++ struct pt_regs *regs); ++ u32 (*report_syscall_entry)(u32 action, struct utrace_engine *engine, ++ struct pt_regs *regs); ++ u32 (*report_syscall_exit)(u32 action, struct utrace_engine *engine, ++ struct pt_regs *regs); ++ u32 (*report_exit)(u32 action, struct utrace_engine *engine, ++ long orig_code, long *code); ++ u32 (*report_death)(struct utrace_engine *engine, ++ bool group_dead, int signal); ++ void (*report_reap)(struct utrace_engine *engine, ++ struct task_struct *task); ++ void (*release)(void *data); ++}; ++ ++/** ++ * struct utrace_examiner - private state for using utrace_prepare_examine() ++ * ++ * The members of &struct utrace_examiner are private to the implementation. ++ * This data type holds the state from a call to utrace_prepare_examine() ++ * to be used by a call to utrace_finish_examine(). ++ */ ++struct utrace_examiner { ++/* private: */ ++ long state; ++ unsigned long ncsw; ++}; ++ ++/* ++ * These are the exported entry points for tracing engines to use. ++ * See kernel/utrace.c for their kerneldoc comments with interface details. ++ */ ++struct utrace_engine *utrace_attach_task(struct task_struct *, int, ++ const struct utrace_engine_ops *, ++ void *); ++struct utrace_engine *utrace_attach_pid(struct pid *, int, ++ const struct utrace_engine_ops *, ++ void *); ++int __must_check utrace_control(struct task_struct *, ++ struct utrace_engine *, ++ enum utrace_resume_action); ++int __must_check utrace_set_events(struct task_struct *, ++ struct utrace_engine *, ++ unsigned long eventmask); ++int __must_check utrace_barrier(struct task_struct *, ++ struct utrace_engine *); ++int __must_check utrace_prepare_examine(struct task_struct *, ++ struct utrace_engine *, ++ struct utrace_examiner *); ++int __must_check utrace_finish_examine(struct task_struct *, ++ struct utrace_engine *, ++ struct utrace_examiner *); ++ ++/** ++ * utrace_control_pid - control a thread being traced by a tracing engine ++ * @pid: thread to affect ++ * @engine: attached engine to affect ++ * @action: &enum utrace_resume_action for thread to do ++ * ++ * This is the same as utrace_control(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++static inline __must_check int utrace_control_pid( ++ struct pid *pid, struct utrace_engine *engine, ++ enum utrace_resume_action action) ++{ ++ /* ++ * We don't bother with rcu_read_lock() here to protect the ++ * task_struct pointer, because utrace_control will return ++ * -ESRCH without looking at that pointer if the engine is ++ * already detached. A task_struct pointer can't die before ++ * all the engines are detached in release_task() first. ++ */ ++ struct task_struct *task = pid_task(pid, PIDTYPE_PID); ++ return unlikely(!task) ? -ESRCH : utrace_control(task, engine, action); ++} ++ ++/** ++ * utrace_set_events_pid - choose which event reports a tracing engine gets ++ * @pid: thread to affect ++ * @engine: attached engine to affect ++ * @eventmask: new event mask ++ * ++ * This is the same as utrace_set_events(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++static inline __must_check int utrace_set_events_pid( ++ struct pid *pid, struct utrace_engine *engine, unsigned long eventmask) ++{ ++ struct task_struct *task = pid_task(pid, PIDTYPE_PID); ++ return unlikely(!task) ? -ESRCH : ++ utrace_set_events(task, engine, eventmask); ++} ++ ++/** ++ * utrace_barrier_pid - synchronize with simultaneous tracing callbacks ++ * @pid: thread to affect ++ * @engine: engine to affect (can be detached) ++ * ++ * This is the same as utrace_barrier(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++static inline __must_check int utrace_barrier_pid(struct pid *pid, ++ struct utrace_engine *engine) ++{ ++ struct task_struct *task = pid_task(pid, PIDTYPE_PID); ++ return unlikely(!task) ? -ESRCH : utrace_barrier(task, engine); ++} ++ ++#endif /* CONFIG_UTRACE */ ++ ++#endif /* linux/utrace.h */ +diff --git a/init/Kconfig b/init/Kconfig +index eb77e8c..b849517 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -320,6 +320,15 @@ config AUDIT_TREE + depends on AUDITSYSCALL + select INOTIFY + ++config UTRACE ++ bool "Infrastructure for tracing and debugging user processes" ++ depends on EXPERIMENTAL ++ depends on HAVE_ARCH_TRACEHOOK ++ help ++ Enable the utrace process tracing interface. This is an internal ++ kernel interface exported to kernel modules, to track events in ++ user threads, extract and change user thread state. ++ + menu "RCU Subsystem" + + choice +diff --git a/kernel/Makefile b/kernel/Makefile +index a987aa1..8bbb631 100644 +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -70,6 +70,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o + obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o + obj-$(CONFIG_STOP_MACHINE) += stop_machine.o + obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o ++obj-$(CONFIG_UTRACE) += utrace.o + obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o + obj-$(CONFIG_AUDITSYSCALL) += auditsc.o + obj-$(CONFIG_GCOV_KERNEL) += gcov/ +diff --git a/kernel/fork.c b/kernel/fork.c +index 4c14942..16030e3 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -161,6 +161,7 @@ void free_task(struct task_struct *tsk) + free_thread_info(tsk->stack); + rt_mutex_debug_task_free(tsk); + ftrace_graph_exit_task(tsk); ++ tracehook_free_task(tsk); + free_task_struct(tsk); + } + EXPORT_SYMBOL(free_task); +@@ -1000,6 +1001,8 @@ static struct task_struct *copy_process( + if (!p) + goto fork_out; + ++ tracehook_init_task(p); ++ + ftrace_graph_init_task(p); + + rt_mutex_init_task(p); +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 067f120..0ad4dc0 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -165,6 +166,14 @@ bool ptrace_may_access(struct task_struc + return !err; + } + ++/* ++ * For experimental use of utrace, exclude ptrace on the same task. ++ */ ++static inline bool exclude_ptrace(struct task_struct *task) ++{ ++ return unlikely(!!task_utrace_flags(task)); ++} ++ + int ptrace_attach(struct task_struct *task) + { + int retval; +@@ -188,6 +197,8 @@ int ptrace_attach(struct task_struct *ta + + task_lock(task); + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ if (!retval && exclude_ptrace(task)) ++ retval = -EBUSY; + task_unlock(task); + if (retval) + goto unlock_creds; +@@ -225,6 +236,9 @@ int ptrace_traceme(void) + { + int ret = -EPERM; + ++ if (exclude_ptrace(current)) /* XXX locking */ ++ return -EBUSY; ++ + write_lock_irq(&tasklist_lock); + /* Are we already being traced? */ + if (!current->ptrace) { +diff --git a/kernel/utrace.c b/kernel/utrace.c +new file mode 100644 +index ...f003e34 100644 +--- /dev/null ++++ b/kernel/utrace.c +@@ -0,0 +1,2436 @@ ++/* ++ * utrace infrastructure interface for debugging user processes ++ * ++ * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU General Public License v.2. ++ * ++ * Red Hat Author: Roland McGrath. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++/* ++ * Per-thread structure private to utrace implementation. ++ * If task_struct.utrace_flags is nonzero, task_struct.utrace ++ * has always been allocated first. Once allocated, it is ++ * never freed until free_task(). ++ * ++ * The common event reporting loops are done by the task making the ++ * report without ever taking any locks. To facilitate this, the two ++ * lists @attached and @attaching work together for smooth asynchronous ++ * attaching with low overhead. Modifying either list requires @lock. ++ * The @attaching list can be modified any time while holding @lock. ++ * New engines being attached always go on this list. ++ * ++ * The @attached list is what the task itself uses for its reporting ++ * loops. When the task itself is not quiescent, it can use the ++ * @attached list without taking any lock. Nobody may modify the list ++ * when the task is not quiescent. When it is quiescent, that means ++ * that it won't run again without taking @lock itself before using ++ * the list. ++ * ++ * At each place where we know the task is quiescent (or it's current), ++ * while holding @lock, we call splice_attaching(), below. This moves ++ * the @attaching list members on to the end of the @attached list. ++ * Since this happens at the start of any reporting pass, any new ++ * engines attached asynchronously go on the stable @attached list ++ * in time to have their callbacks seen. ++ */ ++struct utrace { ++ spinlock_t lock; ++ struct list_head attached, attaching; ++ ++ struct task_struct *cloning; ++ ++ struct utrace_engine *reporting; ++ ++ enum utrace_resume_action resume:UTRACE_RESUME_BITS; ++ unsigned int signal_handler:1; ++ unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */ ++ unsigned int death:1; /* in utrace_report_death() now */ ++ unsigned int reap:1; /* release_task() has run */ ++ unsigned int pending_attach:1; /* need splice_attaching() */ ++}; ++ ++static struct kmem_cache *utrace_cachep; ++static struct kmem_cache *utrace_engine_cachep; ++static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */ ++ ++static int __init utrace_init(void) ++{ ++ utrace_cachep = KMEM_CACHE(utrace, SLAB_PANIC); ++ utrace_engine_cachep = KMEM_CACHE(utrace_engine, SLAB_PANIC); ++ return 0; ++} ++module_init(utrace_init); ++ ++/* ++ * Set up @task.utrace for the first time. We can have races ++ * between two utrace_attach_task() calls here. The task_lock() ++ * governs installing the new pointer. If another one got in first, ++ * we just punt the new one we allocated. ++ * ++ * This returns false only in case of a memory allocation failure. ++ */ ++static bool utrace_task_alloc(struct task_struct *task) ++{ ++ struct utrace *utrace = kmem_cache_zalloc(utrace_cachep, GFP_KERNEL); ++ if (unlikely(!utrace)) ++ return false; ++ spin_lock_init(&utrace->lock); ++ INIT_LIST_HEAD(&utrace->attached); ++ INIT_LIST_HEAD(&utrace->attaching); ++ utrace->resume = UTRACE_RESUME; ++ task_lock(task); ++ if (likely(!task->utrace)) { ++ /* ++ * This barrier makes sure the initialization of the struct ++ * precedes the installation of the pointer. This pairs ++ * with smp_read_barrier_depends() in task_utrace_struct(). ++ */ ++ smp_wmb(); ++ task->utrace = utrace; ++ } ++ task_unlock(task); ++ ++ if (unlikely(task->utrace != utrace)) ++ kmem_cache_free(utrace_cachep, utrace); ++ return true; ++} ++ ++/* ++ * This is called via tracehook_free_task() from free_task() ++ * when @task is being deallocated. ++ */ ++void utrace_free_task(struct task_struct *task) ++{ ++ kmem_cache_free(utrace_cachep, task->utrace); ++} ++ ++/* ++ * This is calledwhen the task is safely quiescent, i.e. it won't consult ++ * utrace->attached without the lock. Move any engines attached ++ * asynchronously from @utrace->attaching onto the @utrace->attached list. ++ */ ++static void splice_attaching(struct utrace *utrace) ++{ ++ lockdep_assert_held(&utrace->lock); ++ list_splice_tail_init(&utrace->attaching, &utrace->attached); ++ utrace->pending_attach = 0; ++} ++ ++/* ++ * This is the exported function used by the utrace_engine_put() inline. ++ */ ++void __utrace_engine_release(struct kref *kref) ++{ ++ struct utrace_engine *engine = container_of(kref, struct utrace_engine, ++ kref); ++ BUG_ON(!list_empty(&engine->entry)); ++ if (engine->release) ++ (*engine->release)(engine->data); ++ kmem_cache_free(utrace_engine_cachep, engine); ++} ++EXPORT_SYMBOL_GPL(__utrace_engine_release); ++ ++static bool engine_matches(struct utrace_engine *engine, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ if ((flags & UTRACE_ATTACH_MATCH_OPS) && engine->ops != ops) ++ return false; ++ if ((flags & UTRACE_ATTACH_MATCH_DATA) && engine->data != data) ++ return false; ++ return engine->ops && engine->ops != &utrace_detached_ops; ++} ++ ++static struct utrace_engine *find_matching_engine( ++ struct utrace *utrace, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ struct utrace_engine *engine; ++ list_for_each_entry(engine, &utrace->attached, entry) ++ if (engine_matches(engine, flags, ops, data)) ++ return engine; ++ list_for_each_entry(engine, &utrace->attaching, entry) ++ if (engine_matches(engine, flags, ops, data)) ++ return engine; ++ return NULL; ++} ++ ++/* ++ * Enqueue @engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE. ++ */ ++static int utrace_add_engine(struct task_struct *target, ++ struct utrace *utrace, ++ struct utrace_engine *engine, ++ int flags, ++ const struct utrace_engine_ops *ops, ++ void *data) ++{ ++ int ret; ++ ++ spin_lock(&utrace->lock); ++ ++ ret = -EEXIST; ++ if ((flags & UTRACE_ATTACH_EXCLUSIVE) && ++ unlikely(find_matching_engine(utrace, flags, ops, data))) ++ goto unlock; ++ ++ /* ++ * In case we had no engines before, make sure that ++ * utrace_flags is not zero. Since we did unlock+lock ++ * at least once after utrace_task_alloc() installed ++ * ->utrace, we have the necessary barrier which pairs ++ * with rmb() in task_utrace_struct(). ++ */ ++ ret = -ESRCH; ++ if (!target->utrace_flags) { ++ target->utrace_flags = UTRACE_EVENT(REAP); ++ /* ++ * If we race with tracehook_prepare_release_task() ++ * make sure that either it sees utrace_flags != 0 ++ * or we see exit_state == EXIT_DEAD. ++ */ ++ smp_mb(); ++ if (unlikely(target->exit_state == EXIT_DEAD)) { ++ target->utrace_flags = 0; ++ goto unlock; ++ } ++ } ++ ++ /* ++ * Put the new engine on the pending ->attaching list. ++ * Make sure it gets onto the ->attached list by the next ++ * time it's examined. Setting ->pending_attach ensures ++ * that start_report() takes the lock and splices the lists ++ * before the next new reporting pass. ++ * ++ * When target == current, it would be safe just to call ++ * splice_attaching() right here. But if we're inside a ++ * callback, that would mean the new engine also gets ++ * notified about the event that precipitated its own ++ * creation. This is not what the user wants. ++ */ ++ list_add_tail(&engine->entry, &utrace->attaching); ++ utrace->pending_attach = 1; ++ ret = 0; ++unlock: ++ spin_unlock(&utrace->lock); ++ ++ return ret; ++} ++ ++/** ++ * utrace_attach_task - attach new engine, or look up an attached engine ++ * @target: thread to attach to ++ * @flags: flag bits combined with OR, see below ++ * @ops: callback table for new engine ++ * @data: engine private data pointer ++ * ++ * The caller must ensure that the @target thread does not get freed, ++ * i.e. hold a ref or be its parent. It is always safe to call this ++ * on @current, or on the @child pointer in a @report_clone callback. ++ * For most other cases, it's easier to use utrace_attach_pid() instead. ++ * ++ * UTRACE_ATTACH_CREATE: ++ * Create a new engine. If %UTRACE_ATTACH_CREATE is not specified, you ++ * only look up an existing engine already attached to the thread. ++ * ++ * UTRACE_ATTACH_EXCLUSIVE: ++ * Attempting to attach a second (matching) engine fails with -%EEXIST. ++ * ++ * UTRACE_ATTACH_MATCH_OPS: Only consider engines matching @ops. ++ * UTRACE_ATTACH_MATCH_DATA: Only consider engines matching @data. ++ * ++ * Calls with neither %UTRACE_ATTACH_MATCH_OPS nor %UTRACE_ATTACH_MATCH_DATA ++ * match the first among any engines attached to @target. That means that ++ * %UTRACE_ATTACH_EXCLUSIVE in such a call fails with -%EEXIST if there ++ * are any engines on @target at all. ++ */ ++struct utrace_engine *utrace_attach_task( ++ struct task_struct *target, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ struct utrace *utrace = task_utrace_struct(target); ++ struct utrace_engine *engine; ++ int ret; ++ ++ if (!(flags & UTRACE_ATTACH_CREATE)) { ++ if (unlikely(!utrace)) ++ return ERR_PTR(-ENOENT); ++ spin_lock(&utrace->lock); ++ engine = find_matching_engine(utrace, flags, ops, data); ++ if (engine) ++ utrace_engine_get(engine); ++ spin_unlock(&utrace->lock); ++ return engine ?: ERR_PTR(-ENOENT); ++ } ++ ++ if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops)) ++ return ERR_PTR(-EINVAL); ++ ++ if (unlikely(target->flags & PF_KTHREAD)) ++ /* ++ * Silly kernel, utrace is for users! ++ */ ++ return ERR_PTR(-EPERM); ++ ++ if (!utrace) { ++ if (unlikely(!utrace_task_alloc(target))) ++ return ERR_PTR(-ENOMEM); ++ utrace = task_utrace_struct(target); ++ } ++ ++ engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL); ++ if (unlikely(!engine)) ++ return ERR_PTR(-ENOMEM); ++ ++ /* ++ * Initialize the new engine structure. It starts out with two ++ * refs: one ref to return, and one ref for being attached. ++ */ ++ kref_set(&engine->kref, 2); ++ engine->flags = 0; ++ engine->ops = ops; ++ engine->data = data; ++ engine->release = ops->release; ++ ++ ret = utrace_add_engine(target, utrace, engine, flags, ops, data); ++ ++ if (unlikely(ret)) { ++ kmem_cache_free(utrace_engine_cachep, engine); ++ engine = ERR_PTR(ret); ++ } ++ ++ return engine; ++} ++EXPORT_SYMBOL_GPL(utrace_attach_task); ++ ++/** ++ * utrace_attach_pid - attach new engine, or look up an attached engine ++ * @pid: &struct pid pointer representing thread to attach to ++ * @flags: flag bits combined with OR, see utrace_attach_task() ++ * @ops: callback table for new engine ++ * @data: engine private data pointer ++ * ++ * This is the same as utrace_attach_task(), but takes a &struct pid ++ * pointer rather than a &struct task_struct pointer. The caller must ++ * hold a ref on @pid, but does not need to worry about the task ++ * staying valid. If it's been reaped so that @pid points nowhere, ++ * then this call returns -%ESRCH. ++ */ ++struct utrace_engine *utrace_attach_pid( ++ struct pid *pid, int flags, ++ const struct utrace_engine_ops *ops, void *data) ++{ ++ struct utrace_engine *engine = ERR_PTR(-ESRCH); ++ struct task_struct *task = get_pid_task(pid, PIDTYPE_PID); ++ if (task) { ++ engine = utrace_attach_task(task, flags, ops, data); ++ put_task_struct(task); ++ } ++ return engine; ++} ++EXPORT_SYMBOL_GPL(utrace_attach_pid); ++ ++/* ++ * When an engine is detached, the target thread may still see it and ++ * make callbacks until it quiesces. We install a special ops vector ++ * with these two callbacks. When the target thread quiesces, it can ++ * safely free the engine itself. For any event we will always get ++ * the report_quiesce() callback first, so we only need this one ++ * pointer to be set. The only exception is report_reap(), so we ++ * supply that callback too. ++ */ ++static u32 utrace_detached_quiesce(u32 action, struct utrace_engine *engine, ++ unsigned long event) ++{ ++ return UTRACE_DETACH; ++} ++ ++static void utrace_detached_reap(struct utrace_engine *engine, ++ struct task_struct *task) ++{ ++} ++ ++static const struct utrace_engine_ops utrace_detached_ops = { ++ .report_quiesce = &utrace_detached_quiesce, ++ .report_reap = &utrace_detached_reap ++}; ++ ++/* ++ * The caller has to hold a ref on the engine. If the attached flag is ++ * true (all but utrace_barrier() calls), the engine is supposed to be ++ * attached. If the attached flag is false (utrace_barrier() only), ++ * then return -ERESTARTSYS for an engine marked for detach but not yet ++ * fully detached. The task pointer can be invalid if the engine is ++ * detached. ++ * ++ * Get the utrace lock for the target task. ++ * Returns the struct if locked, or ERR_PTR(-errno). ++ * ++ * This has to be robust against races with: ++ * utrace_control(target, UTRACE_DETACH) calls ++ * UTRACE_DETACH after reports ++ * utrace_report_death ++ * utrace_release_task ++ */ ++static struct utrace *get_utrace_lock(struct task_struct *target, ++ struct utrace_engine *engine, ++ bool attached) ++ __acquires(utrace->lock) ++{ ++ struct utrace *utrace; ++ ++ rcu_read_lock(); ++ ++ /* ++ * If this engine was already detached, bail out before we look at ++ * the task_struct pointer at all. If it's detached after this ++ * check, then RCU is still keeping this task_struct pointer valid. ++ * ++ * The ops pointer is NULL when the engine is fully detached. ++ * It's &utrace_detached_ops when it's marked detached but still ++ * on the list. In the latter case, utrace_barrier() still works, ++ * since the target might be in the middle of an old callback. ++ */ ++ if (unlikely(!engine->ops)) { ++ rcu_read_unlock(); ++ return ERR_PTR(-ESRCH); ++ } ++ ++ if (unlikely(engine->ops == &utrace_detached_ops)) { ++ rcu_read_unlock(); ++ return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS); ++ } ++ ++ utrace = task_utrace_struct(target); ++ spin_lock(&utrace->lock); ++ if (unlikely(!engine->ops) || ++ unlikely(engine->ops == &utrace_detached_ops)) { ++ /* ++ * By the time we got the utrace lock, ++ * it had been reaped or detached already. ++ */ ++ spin_unlock(&utrace->lock); ++ utrace = ERR_PTR(-ESRCH); ++ if (!attached && engine->ops == &utrace_detached_ops) ++ utrace = ERR_PTR(-ERESTARTSYS); ++ } ++ rcu_read_unlock(); ++ ++ return utrace; ++} ++ ++/* ++ * Now that we don't hold any locks, run through any ++ * detached engines and free their references. Each ++ * engine had one implicit ref while it was attached. ++ */ ++static void put_detached_list(struct list_head *list) ++{ ++ struct utrace_engine *engine, *next; ++ list_for_each_entry_safe(engine, next, list, entry) { ++ list_del_init(&engine->entry); ++ utrace_engine_put(engine); ++ } ++} ++ ++/* ++ * We use an extra bit in utrace_engine.flags past the event bits, ++ * to record whether the engine is keeping the target thread stopped. ++ * ++ * This bit is set in task_struct.utrace_flags whenever it is set in any ++ * engine's flags. Only utrace_reset() resets it in utrace_flags. ++ */ ++#define ENGINE_STOP (1UL << _UTRACE_NEVENTS) ++ ++static void mark_engine_wants_stop(struct task_struct *task, ++ struct utrace_engine *engine) ++{ ++ engine->flags |= ENGINE_STOP; ++ task->utrace_flags |= ENGINE_STOP; ++} ++ ++static void clear_engine_wants_stop(struct utrace_engine *engine) ++{ ++ engine->flags &= ~ENGINE_STOP; ++} ++ ++static bool engine_wants_stop(struct utrace_engine *engine) ++{ ++ return (engine->flags & ENGINE_STOP) != 0; ++} ++ ++/** ++ * utrace_set_events - choose which event reports a tracing engine gets ++ * @target: thread to affect ++ * @engine: attached engine to affect ++ * @events: new event mask ++ * ++ * This changes the set of events for which @engine wants callbacks made. ++ * ++ * This fails with -%EALREADY and does nothing if you try to clear ++ * %UTRACE_EVENT(%DEATH) when the @report_death callback may already have ++ * begun, if you try to clear %UTRACE_EVENT(%REAP) when the @report_reap ++ * callback may already have begun, or if you try to newly set ++ * %UTRACE_EVENT(%DEATH) or %UTRACE_EVENT(%QUIESCE) when @target is ++ * already dead or dying. ++ * ++ * This can fail with -%ESRCH when @target has already been detached, ++ * including forcible detach on reaping. ++ * ++ * If @target was stopped before the call, then after a successful call, ++ * no event callbacks not requested in @events will be made; if ++ * %UTRACE_EVENT(%QUIESCE) is included in @events, then a ++ * @report_quiesce callback will be made when @target resumes. ++ * ++ * If @target was not stopped and @events excludes some bits that were ++ * set before, this can return -%EINPROGRESS to indicate that @target ++ * may have been making some callback to @engine. When this returns ++ * zero, you can be sure that no event callbacks you've disabled in ++ * @events can be made. If @events only sets new bits that were not set ++ * before on @engine, then -%EINPROGRESS will never be returned. ++ * ++ * To synchronize after an -%EINPROGRESS return, see utrace_barrier(). ++ * ++ * When @target is @current, -%EINPROGRESS is not returned. But note ++ * that a newly-created engine will not receive any callbacks related to ++ * an event notification already in progress. This call enables @events ++ * callbacks to be made as soon as @engine becomes eligible for any ++ * callbacks, see utrace_attach_task(). ++ * ++ * These rules provide for coherent synchronization based on %UTRACE_STOP, ++ * even when %SIGKILL is breaking its normal simple rules. ++ */ ++int utrace_set_events(struct task_struct *target, ++ struct utrace_engine *engine, ++ unsigned long events) ++{ ++ struct utrace *utrace; ++ unsigned long old_flags, old_utrace_flags; ++ int ret; ++ ++ /* ++ * We just ignore the internal bit, so callers can use ++ * engine->flags to seed bitwise ops for our argument. ++ */ ++ events &= ~ENGINE_STOP; ++ ++ utrace = get_utrace_lock(target, engine, true); ++ if (unlikely(IS_ERR(utrace))) ++ return PTR_ERR(utrace); ++ ++ old_utrace_flags = target->utrace_flags; ++ old_flags = engine->flags & ~ENGINE_STOP; ++ ++ if (target->exit_state && ++ (((events & ~old_flags) & _UTRACE_DEATH_EVENTS) || ++ (utrace->death && ++ ((old_flags & ~events) & _UTRACE_DEATH_EVENTS)) || ++ (utrace->reap && ((old_flags & ~events) & UTRACE_EVENT(REAP))))) { ++ spin_unlock(&utrace->lock); ++ return -EALREADY; ++ } ++ ++ /* ++ * When setting these flags, it's essential that we really ++ * synchronize with exit_notify(). They cannot be set after ++ * exit_notify() takes the tasklist_lock. By holding the read ++ * lock here while setting the flags, we ensure that the calls ++ * to tracehook_notify_death() and tracehook_report_death() will ++ * see the new flags. This ensures that utrace_release_task() ++ * knows positively that utrace_report_death() will be called or ++ * that it won't. ++ */ ++ if ((events & ~old_utrace_flags) & _UTRACE_DEATH_EVENTS) { ++ read_lock(&tasklist_lock); ++ if (unlikely(target->exit_state)) { ++ read_unlock(&tasklist_lock); ++ spin_unlock(&utrace->lock); ++ return -EALREADY; ++ } ++ target->utrace_flags |= events; ++ read_unlock(&tasklist_lock); ++ } ++ ++ engine->flags = events | (engine->flags & ENGINE_STOP); ++ target->utrace_flags |= events; ++ ++ if ((events & UTRACE_EVENT_SYSCALL) && ++ !(old_utrace_flags & UTRACE_EVENT_SYSCALL)) ++ set_tsk_thread_flag(target, TIF_SYSCALL_TRACE); ++ ++ ret = 0; ++ if ((old_flags & ~events) && target != current && ++ !task_is_stopped_or_traced(target) && !target->exit_state) { ++ /* ++ * This barrier ensures that our engine->flags changes ++ * have hit before we examine utrace->reporting, ++ * pairing with the barrier in start_callback(). If ++ * @target has not yet hit finish_callback() to clear ++ * utrace->reporting, we might be in the middle of a ++ * callback to @engine. ++ */ ++ smp_mb(); ++ if (utrace->reporting == engine) ++ ret = -EINPROGRESS; ++ } ++ ++ spin_unlock(&utrace->lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_set_events); ++ ++/* ++ * Asynchronously mark an engine as being detached. ++ * ++ * This must work while the target thread races with us doing ++ * start_callback(), defined below. It uses smp_rmb() between checking ++ * @engine->flags and using @engine->ops. Here we change @engine->ops ++ * first, then use smp_wmb() before changing @engine->flags. This ensures ++ * it can check the old flags before using the old ops, or check the old ++ * flags before using the new ops, or check the new flags before using the ++ * new ops, but can never check the new flags before using the old ops. ++ * Hence, utrace_detached_ops might be used with any old flags in place. ++ * It has report_quiesce() and report_reap() callbacks to handle all cases. ++ */ ++static void mark_engine_detached(struct utrace_engine *engine) ++{ ++ engine->ops = &utrace_detached_ops; ++ smp_wmb(); ++ engine->flags = UTRACE_EVENT(QUIESCE); ++} ++ ++/* ++ * Get @target to stop and return true if it is already stopped now. ++ * If we return false, it will make some event callback soonish. ++ * Called with @utrace locked. ++ */ ++static bool utrace_do_stop(struct task_struct *target, struct utrace *utrace) ++{ ++ if (task_is_stopped(target)) { ++ /* ++ * Stopped is considered quiescent; when it wakes up, it will ++ * go through utrace_finish_stop() before doing anything else. ++ */ ++ spin_lock_irq(&target->sighand->siglock); ++ if (likely(task_is_stopped(target))) ++ __set_task_state(target, TASK_TRACED); ++ spin_unlock_irq(&target->sighand->siglock); ++ } else if (utrace->resume > UTRACE_REPORT) { ++ utrace->resume = UTRACE_REPORT; ++ set_notify_resume(target); ++ } ++ ++ return task_is_traced(target); ++} ++ ++/* ++ * If the target is not dead it should not be in tracing ++ * stop any more. Wake it unless it's in job control stop. ++ */ ++static void utrace_wakeup(struct task_struct *target, struct utrace *utrace) ++{ ++ lockdep_assert_held(&utrace->lock); ++ spin_lock_irq(&target->sighand->siglock); ++ if (target->signal->flags & SIGNAL_STOP_STOPPED || ++ target->signal->group_stop_count) ++ target->state = TASK_STOPPED; ++ else ++ wake_up_state(target, __TASK_TRACED); ++ spin_unlock_irq(&target->sighand->siglock); ++} ++ ++/* ++ * This is called when there might be some detached engines on the list or ++ * some stale bits in @task->utrace_flags. Clean them up and recompute the ++ * flags. Returns true if we're now fully detached. ++ * ++ * Called with @utrace->lock held, returns with it released. ++ * After this returns, @utrace might be freed if everything detached. ++ */ ++static bool utrace_reset(struct task_struct *task, struct utrace *utrace) ++ __releases(utrace->lock) ++{ ++ struct utrace_engine *engine, *next; ++ unsigned long flags = 0; ++ LIST_HEAD(detached); ++ ++ splice_attaching(utrace); ++ ++ /* ++ * Update the set of events of interest from the union ++ * of the interests of the remaining tracing engines. ++ * For any engine marked detached, remove it from the list. ++ * We'll collect them on the detached list. ++ */ ++ list_for_each_entry_safe(engine, next, &utrace->attached, entry) { ++ if (engine->ops == &utrace_detached_ops) { ++ engine->ops = NULL; ++ list_move(&engine->entry, &detached); ++ } else { ++ flags |= engine->flags | UTRACE_EVENT(REAP); ++ } ++ } ++ ++ if (task->exit_state) { ++ /* ++ * Once it's already dead, we never install any flags ++ * except REAP. When ->exit_state is set and events ++ * like DEATH are not set, then they never can be set. ++ * This ensures that utrace_release_task() knows ++ * positively that utrace_report_death() can never run. ++ */ ++ BUG_ON(utrace->death); ++ flags &= UTRACE_EVENT(REAP); ++ } else if (!(flags & UTRACE_EVENT_SYSCALL) && ++ test_tsk_thread_flag(task, TIF_SYSCALL_TRACE)) { ++ clear_tsk_thread_flag(task, TIF_SYSCALL_TRACE); ++ } ++ ++ if (!flags) { ++ /* ++ * No more engines, cleared out the utrace. ++ */ ++ utrace->resume = UTRACE_RESUME; ++ utrace->signal_handler = 0; ++ } ++ ++ /* ++ * If no more engines want it stopped, wake it up. ++ */ ++ if (task_is_traced(task) && !(flags & ENGINE_STOP)) ++ utrace_wakeup(task, utrace); ++ ++ /* ++ * In theory spin_lock() doesn't imply rcu_read_lock(). ++ * Once we clear ->utrace_flags this task_struct can go away ++ * because tracehook_prepare_release_task() path does not take ++ * utrace->lock when ->utrace_flags == 0. ++ */ ++ rcu_read_lock(); ++ task->utrace_flags = flags; ++ spin_unlock(&utrace->lock); ++ rcu_read_unlock(); ++ ++ put_detached_list(&detached); ++ ++ return !flags; ++} ++ ++void utrace_finish_stop(void) ++{ ++ /* ++ * If we were task_is_traced() and then SIGKILL'ed, make ++ * sure we do nothing until the tracer drops utrace->lock. ++ */ ++ if (unlikely(__fatal_signal_pending(current))) { ++ struct utrace *utrace = task_utrace_struct(current); ++ spin_unlock_wait(&utrace->lock); ++ } ++} ++ ++/* ++ * Perform %UTRACE_STOP, i.e. block in TASK_TRACED until woken up. ++ * @task == current, @utrace == current->utrace, which is not locked. ++ * Return true if we were woken up by SIGKILL even though some utrace ++ * engine may still want us to stay stopped. ++ */ ++static void utrace_stop(struct task_struct *task, struct utrace *utrace, ++ enum utrace_resume_action action) ++{ ++relock: ++ spin_lock(&utrace->lock); ++ ++ if (action < utrace->resume) { ++ /* ++ * Ensure a reporting pass when we're resumed. ++ */ ++ utrace->resume = action; ++ if (action == UTRACE_INTERRUPT) ++ set_thread_flag(TIF_SIGPENDING); ++ else ++ set_thread_flag(TIF_NOTIFY_RESUME); ++ } ++ ++ /* ++ * If the ENGINE_STOP bit is clear in utrace_flags, that means ++ * utrace_reset() ran after we processed some UTRACE_STOP return ++ * values from callbacks to get here. If all engines have detached ++ * or resumed us, we don't stop. This check doesn't require ++ * siglock, but it should follow the interrupt/report bookkeeping ++ * steps (this can matter for UTRACE_RESUME but not UTRACE_DETACH). ++ */ ++ if (unlikely(!(task->utrace_flags & ENGINE_STOP))) { ++ utrace_reset(task, utrace); ++ if (task->utrace_flags & ENGINE_STOP) ++ goto relock; ++ return; ++ } ++ ++ /* ++ * The siglock protects us against signals. As well as SIGKILL ++ * waking us up, we must synchronize with the signal bookkeeping ++ * for stop signals and SIGCONT. ++ */ ++ spin_lock_irq(&task->sighand->siglock); ++ ++ if (unlikely(__fatal_signal_pending(task))) { ++ spin_unlock_irq(&task->sighand->siglock); ++ spin_unlock(&utrace->lock); ++ return; ++ } ++ ++ __set_current_state(TASK_TRACED); ++ ++ /* ++ * If there is a group stop in progress, ++ * we must participate in the bookkeeping. ++ */ ++ if (unlikely(task->signal->group_stop_count) && ++ !--task->signal->group_stop_count) ++ task->signal->flags = SIGNAL_STOP_STOPPED; ++ ++ spin_unlock_irq(&task->sighand->siglock); ++ spin_unlock(&utrace->lock); ++ ++ schedule(); ++ ++ utrace_finish_stop(); ++ ++ /* ++ * While in TASK_TRACED, we were considered "frozen enough". ++ * Now that we woke up, it's crucial if we're supposed to be ++ * frozen that we freeze now before running anything substantial. ++ */ ++ try_to_freeze(); ++ ++ /* ++ * While we were in TASK_TRACED, complete_signal() considered ++ * us "uninterested" in signal wakeups. Now make sure our ++ * TIF_SIGPENDING state is correct for normal running. ++ */ ++ spin_lock_irq(&task->sighand->siglock); ++ recalc_sigpending(); ++ spin_unlock_irq(&task->sighand->siglock); ++} ++ ++/* ++ * Called by release_task() with @reap set to true. ++ * Called by utrace_report_death() with @reap set to false. ++ * On reap, make report_reap callbacks and clean out @utrace ++ * unless still making callbacks. On death, update bookkeeping ++ * and handle the reap work if release_task() came in first. ++ */ ++void utrace_maybe_reap(struct task_struct *target, struct utrace *utrace, ++ bool reap) ++{ ++ struct utrace_engine *engine, *next; ++ struct list_head attached; ++ ++ spin_lock(&utrace->lock); ++ ++ if (reap) { ++ /* ++ * If the target will do some final callbacks but hasn't ++ * finished them yet, we know because it clears these event ++ * bits after it's done. Instead of cleaning up here and ++ * requiring utrace_report_death() to cope with it, we ++ * delay the REAP report and the teardown until after the ++ * target finishes its death reports. ++ */ ++ utrace->reap = 1; ++ ++ if (target->utrace_flags & _UTRACE_DEATH_EVENTS) { ++ spin_unlock(&utrace->lock); ++ return; ++ } ++ } else { ++ /* ++ * After we unlock with this flag clear, any competing ++ * utrace_control/utrace_set_events calls know that we've ++ * finished our callbacks and any detach bookkeeping. ++ */ ++ utrace->death = 0; ++ ++ if (!utrace->reap) { ++ /* ++ * We're just dead, not reaped yet. This will ++ * reset @target->utrace_flags so the later call ++ * with @reap set won't hit the check above. ++ */ ++ utrace_reset(target, utrace); ++ return; ++ } ++ } ++ ++ /* ++ * utrace_add_engine() checks ->utrace_flags != 0. Since ++ * @utrace->reap is set, nobody can set or clear UTRACE_EVENT(REAP) ++ * in @engine->flags or change @engine->ops and nobody can change ++ * @utrace->attached after we drop the lock. ++ */ ++ target->utrace_flags = 0; ++ ++ /* ++ * We clear out @utrace->attached before we drop the lock so ++ * that find_matching_engine() can't come across any old engine ++ * while we are busy tearing it down. ++ */ ++ list_replace_init(&utrace->attached, &attached); ++ list_splice_tail_init(&utrace->attaching, &attached); ++ ++ spin_unlock(&utrace->lock); ++ ++ list_for_each_entry_safe(engine, next, &attached, entry) { ++ if (engine->flags & UTRACE_EVENT(REAP)) ++ engine->ops->report_reap(engine, target); ++ ++ engine->ops = NULL; ++ engine->flags = 0; ++ list_del_init(&engine->entry); ++ ++ utrace_engine_put(engine); ++ } ++} ++ ++/* ++ * You can't do anything to a dead task but detach it. ++ * If release_task() has been called, you can't do that. ++ * ++ * On the exit path, DEATH and QUIESCE event bits are set only ++ * before utrace_report_death() has taken the lock. At that point, ++ * the death report will come soon, so disallow detach until it's ++ * done. This prevents us from racing with it detaching itself. ++ * ++ * Called only when @target->exit_state is nonzero. ++ */ ++static inline int utrace_control_dead(struct task_struct *target, ++ struct utrace *utrace, ++ enum utrace_resume_action action) ++{ ++ lockdep_assert_held(&utrace->lock); ++ ++ if (action != UTRACE_DETACH || unlikely(utrace->reap)) ++ return -ESRCH; ++ ++ if (unlikely(utrace->death)) ++ /* ++ * We have already started the death report. We can't ++ * prevent the report_death and report_reap callbacks, ++ * so tell the caller they will happen. ++ */ ++ return -EALREADY; ++ ++ return 0; ++} ++ ++/** ++ * utrace_control - control a thread being traced by a tracing engine ++ * @target: thread to affect ++ * @engine: attached engine to affect ++ * @action: &enum utrace_resume_action for thread to do ++ * ++ * This is how a tracing engine asks a traced thread to do something. ++ * This call is controlled by the @action argument, which has the ++ * same meaning as the &enum utrace_resume_action value returned by ++ * event reporting callbacks. ++ * ++ * If @target is already dead (@target->exit_state nonzero), ++ * all actions except %UTRACE_DETACH fail with -%ESRCH. ++ * ++ * The following sections describe each option for the @action argument. ++ * ++ * UTRACE_DETACH: ++ * ++ * After this, the @engine data structure is no longer accessible, ++ * and the thread might be reaped. The thread will start running ++ * again if it was stopped and no longer has any attached engines ++ * that want it stopped. ++ * ++ * If the @report_reap callback may already have begun, this fails ++ * with -%ESRCH. If the @report_death callback may already have ++ * begun, this fails with -%EALREADY. ++ * ++ * If @target is not already stopped, then a callback to this engine ++ * might be in progress or about to start on another CPU. If so, ++ * then this returns -%EINPROGRESS; the detach happens as soon as ++ * the pending callback is finished. To synchronize after an ++ * -%EINPROGRESS return, see utrace_barrier(). ++ * ++ * If @target is properly stopped before utrace_control() is called, ++ * then after successful return it's guaranteed that no more callbacks ++ * to the @engine->ops vector will be made. ++ * ++ * The only exception is %SIGKILL (and exec or group-exit by another ++ * thread in the group), which can cause asynchronous @report_death ++ * and/or @report_reap callbacks even when %UTRACE_STOP was used. ++ * (In that event, this fails with -%ESRCH or -%EALREADY, see above.) ++ * ++ * UTRACE_STOP: ++ * ++ * This asks that @target stop running. This returns 0 only if ++ * @target is already stopped, either for tracing or for job ++ * control. Then @target will remain stopped until another ++ * utrace_control() call is made on @engine; @target can be woken ++ * only by %SIGKILL (or equivalent, such as exec or termination by ++ * another thread in the same thread group). ++ * ++ * This returns -%EINPROGRESS if @target is not already stopped. ++ * Then the effect is like %UTRACE_REPORT. A @report_quiesce or ++ * @report_signal callback will be made soon. Your callback can ++ * then return %UTRACE_STOP to keep @target stopped. ++ * ++ * This does not interrupt system calls in progress, including ones ++ * that sleep for a long time. For that, use %UTRACE_INTERRUPT. ++ * To interrupt system calls and then keep @target stopped, your ++ * @report_signal callback can return %UTRACE_STOP. ++ * ++ * UTRACE_RESUME: ++ * ++ * Just let @target continue running normally, reversing the effect ++ * of a previous %UTRACE_STOP. If another engine is keeping @target ++ * stopped, then it remains stopped until all engines let it resume. ++ * If @target was not stopped, this has no effect. ++ * ++ * UTRACE_REPORT: ++ * ++ * This is like %UTRACE_RESUME, but also ensures that there will be ++ * a @report_quiesce or @report_signal callback made soon. If ++ * @target had been stopped, then there will be a callback before it ++ * resumes running normally. If another engine is keeping @target ++ * stopped, then there might be no callbacks until all engines let ++ * it resume. ++ * ++ * Since this is meaningless unless @report_quiesce callbacks will ++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). ++ * ++ * UTRACE_INTERRUPT: ++ * ++ * This is like %UTRACE_REPORT, but ensures that @target will make a ++ * @report_signal callback before it resumes or delivers signals. ++ * If @target was in a system call or about to enter one, work in ++ * progress will be interrupted as if by %SIGSTOP. If another ++ * engine is keeping @target stopped, then there might be no ++ * callbacks until all engines let it resume. ++ * ++ * This gives @engine an opportunity to introduce a forced signal ++ * disposition via its @report_signal callback. ++ * ++ * UTRACE_SINGLESTEP: ++ * ++ * It's invalid to use this unless arch_has_single_step() returned true. ++ * This is like %UTRACE_RESUME, but resumes for one user instruction only. ++ * ++ * Note that passing %UTRACE_SINGLESTEP or %UTRACE_BLOCKSTEP to ++ * utrace_control() or returning it from an event callback alone does ++ * not necessarily ensure that stepping will be enabled. If there are ++ * more callbacks made to any engine before returning to user mode, ++ * then the resume action is chosen only by the last set of callbacks. ++ * To be sure, enable %UTRACE_EVENT(%QUIESCE) and look for the ++ * @report_quiesce callback with a zero event mask, or the ++ * @report_signal callback with %UTRACE_SIGNAL_REPORT. ++ * ++ * Since this is not robust unless @report_quiesce callbacks will ++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). ++ * ++ * UTRACE_BLOCKSTEP: ++ * ++ * It's invalid to use this unless arch_has_block_step() returned true. ++ * This is like %UTRACE_SINGLESTEP, but resumes for one whole basic ++ * block of user instructions. ++ * ++ * Since this is not robust unless @report_quiesce callbacks will ++ * be made, it returns -%EINVAL if @engine lacks %UTRACE_EVENT(%QUIESCE). ++ * ++ * %UTRACE_BLOCKSTEP devolves to %UTRACE_SINGLESTEP when another ++ * tracing engine is using %UTRACE_SINGLESTEP at the same time. ++ */ ++int utrace_control(struct task_struct *target, ++ struct utrace_engine *engine, ++ enum utrace_resume_action action) ++{ ++ struct utrace *utrace; ++ bool reset; ++ int ret; ++ ++ if (unlikely(action >= UTRACE_RESUME_MAX)) { ++ WARN(1, "invalid action argument to utrace_control()!"); ++ return -EINVAL; ++ } ++ ++ /* ++ * This is a sanity check for a programming error in the caller. ++ * Their request can only work properly in all cases by relying on ++ * a follow-up callback, but they didn't set one up! This check ++ * doesn't do locking, but it shouldn't matter. The caller has to ++ * be synchronously sure the callback is set up to be operating the ++ * interface properly. ++ */ ++ if (action >= UTRACE_REPORT && action < UTRACE_RESUME && ++ unlikely(!(engine->flags & UTRACE_EVENT(QUIESCE)))) { ++ WARN(1, "utrace_control() with no QUIESCE callback in place!"); ++ return -EINVAL; ++ } ++ ++ utrace = get_utrace_lock(target, engine, true); ++ if (unlikely(IS_ERR(utrace))) ++ return PTR_ERR(utrace); ++ ++ reset = task_is_traced(target); ++ ret = 0; ++ ++ /* ++ * ->exit_state can change under us, this doesn't matter. ++ * We do not care about ->exit_state in fact, but we do ++ * care about ->reap and ->death. If either flag is set, ++ * we must also see ->exit_state != 0. ++ */ ++ if (unlikely(target->exit_state)) { ++ ret = utrace_control_dead(target, utrace, action); ++ if (ret) { ++ spin_unlock(&utrace->lock); ++ return ret; ++ } ++ reset = true; ++ } ++ ++ switch (action) { ++ case UTRACE_STOP: ++ mark_engine_wants_stop(target, engine); ++ if (!reset && !utrace_do_stop(target, utrace)) ++ ret = -EINPROGRESS; ++ reset = false; ++ break; ++ ++ case UTRACE_DETACH: ++ if (engine_wants_stop(engine)) ++ target->utrace_flags &= ~ENGINE_STOP; ++ mark_engine_detached(engine); ++ reset = reset || utrace_do_stop(target, utrace); ++ if (!reset) { ++ /* ++ * As in utrace_set_events(), this barrier ensures ++ * that our engine->flags changes have hit before we ++ * examine utrace->reporting, pairing with the barrier ++ * in start_callback(). If @target has not yet hit ++ * finish_callback() to clear utrace->reporting, we ++ * might be in the middle of a callback to @engine. ++ */ ++ smp_mb(); ++ if (utrace->reporting == engine) ++ ret = -EINPROGRESS; ++ } ++ break; ++ ++ case UTRACE_RESUME: ++ /* ++ * This and all other cases imply resuming if stopped. ++ * There might not be another report before it just ++ * resumes, so make sure single-step is not left set. ++ */ ++ clear_engine_wants_stop(engine); ++ if (likely(reset)) ++ user_disable_single_step(target); ++ break; ++ ++ case UTRACE_BLOCKSTEP: ++ /* ++ * Resume from stopped, step one block. ++ * We fall through to treat it like UTRACE_SINGLESTEP. ++ */ ++ if (unlikely(!arch_has_block_step())) { ++ WARN(1, "UTRACE_BLOCKSTEP when !arch_has_block_step()"); ++ action = UTRACE_SINGLESTEP; ++ } ++ ++ case UTRACE_SINGLESTEP: ++ /* ++ * Resume from stopped, step one instruction. ++ * We fall through to the UTRACE_REPORT case. ++ */ ++ if (unlikely(!arch_has_single_step())) { ++ WARN(1, ++ "UTRACE_SINGLESTEP when !arch_has_single_step()"); ++ reset = false; ++ ret = -EOPNOTSUPP; ++ break; ++ } ++ ++ case UTRACE_REPORT: ++ /* ++ * Make the thread call tracehook_notify_resume() soon. ++ * But don't bother if it's already been interrupted. ++ * In that case, utrace_get_signal() will be reporting soon. ++ */ ++ clear_engine_wants_stop(engine); ++ if (action < utrace->resume) { ++ utrace->resume = action; ++ set_notify_resume(target); ++ } ++ break; ++ ++ case UTRACE_INTERRUPT: ++ /* ++ * Make the thread call tracehook_get_signal() soon. ++ */ ++ clear_engine_wants_stop(engine); ++ if (utrace->resume == UTRACE_INTERRUPT) ++ break; ++ utrace->resume = UTRACE_INTERRUPT; ++ ++ /* ++ * If it's not already stopped, interrupt it now. We need ++ * the siglock here in case it calls recalc_sigpending() ++ * and clears its own TIF_SIGPENDING. By taking the lock, ++ * we've serialized any later recalc_sigpending() after our ++ * setting of utrace->resume to force it on. ++ */ ++ if (reset) { ++ /* ++ * This is really just to keep the invariant that ++ * TIF_SIGPENDING is set with UTRACE_INTERRUPT. ++ * When it's stopped, we know it's always going ++ * through utrace_get_signal() and will recalculate. ++ */ ++ set_tsk_thread_flag(target, TIF_SIGPENDING); ++ } else { ++ struct sighand_struct *sighand; ++ unsigned long irqflags; ++ sighand = lock_task_sighand(target, &irqflags); ++ if (likely(sighand)) { ++ signal_wake_up(target, 0); ++ unlock_task_sighand(target, &irqflags); ++ } ++ } ++ break; ++ ++ default: ++ BUG(); /* We checked it on entry. */ ++ } ++ ++ /* ++ * Let the thread resume running. If it's not stopped now, ++ * there is nothing more we need to do. ++ */ ++ if (reset) ++ utrace_reset(target, utrace); ++ else ++ spin_unlock(&utrace->lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_control); ++ ++/** ++ * utrace_barrier - synchronize with simultaneous tracing callbacks ++ * @target: thread to affect ++ * @engine: engine to affect (can be detached) ++ * ++ * This blocks while @target might be in the midst of making a callback to ++ * @engine. It can be interrupted by signals and will return -%ERESTARTSYS. ++ * A return value of zero means no callback from @target to @engine was ++ * in progress. Any effect of its return value (such as %UTRACE_STOP) has ++ * already been applied to @engine. ++ * ++ * It's not necessary to keep the @target pointer alive for this call. ++ * It's only necessary to hold a ref on @engine. This will return ++ * safely even if @target has been reaped and has no task refs. ++ * ++ * A successful return from utrace_barrier() guarantees its ordering ++ * with respect to utrace_set_events() and utrace_control() calls. If ++ * @target was not properly stopped, event callbacks just disabled might ++ * still be in progress; utrace_barrier() waits until there is no chance ++ * an unwanted callback can be in progress. ++ */ ++int utrace_barrier(struct task_struct *target, struct utrace_engine *engine) ++{ ++ struct utrace *utrace; ++ int ret = -ERESTARTSYS; ++ ++ if (unlikely(target == current)) ++ return 0; ++ ++ do { ++ utrace = get_utrace_lock(target, engine, false); ++ if (unlikely(IS_ERR(utrace))) { ++ ret = PTR_ERR(utrace); ++ if (ret != -ERESTARTSYS) ++ break; ++ } else { ++ /* ++ * All engine state changes are done while ++ * holding the lock, i.e. before we get here. ++ * Since we have the lock, we only need to ++ * worry about @target making a callback. ++ * When it has entered start_callback() but ++ * not yet gotten to finish_callback(), we ++ * will see utrace->reporting == @engine. ++ * When @target doesn't take the lock, it uses ++ * barriers to order setting utrace->reporting ++ * before it examines the engine state. ++ */ ++ if (utrace->reporting != engine) ++ ret = 0; ++ spin_unlock(&utrace->lock); ++ if (!ret) ++ break; ++ } ++ schedule_timeout_interruptible(1); ++ } while (!signal_pending(current)); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_barrier); ++ ++/* ++ * This is local state used for reporting loops, perhaps optimized away. ++ */ ++struct utrace_report { ++ u32 result; ++ enum utrace_resume_action action; ++ enum utrace_resume_action resume_action; ++ bool detaches; ++ bool spurious; ++}; ++ ++#define INIT_REPORT(var) \ ++ struct utrace_report var = { \ ++ .action = UTRACE_RESUME, \ ++ .resume_action = UTRACE_RESUME, \ ++ .spurious = true \ ++ } ++ ++/* ++ * We are now making the report, so clear the flag saying we need one. ++ * When there is a new attach, ->pending_attach is set just so we will ++ * know to do splice_attaching() here before the callback loop. ++ */ ++static enum utrace_resume_action start_report(struct utrace *utrace) ++{ ++ enum utrace_resume_action resume = utrace->resume; ++ if (utrace->pending_attach || ++ (resume > UTRACE_INTERRUPT && resume < UTRACE_RESUME)) { ++ spin_lock(&utrace->lock); ++ splice_attaching(utrace); ++ resume = utrace->resume; ++ if (resume > UTRACE_INTERRUPT) ++ utrace->resume = UTRACE_RESUME; ++ spin_unlock(&utrace->lock); ++ } ++ return resume; ++} ++ ++static inline void finish_report_reset(struct task_struct *task, ++ struct utrace *utrace, ++ struct utrace_report *report) ++{ ++ if (unlikely(report->spurious || report->detaches)) { ++ spin_lock(&utrace->lock); ++ if (utrace_reset(task, utrace)) ++ report->action = UTRACE_RESUME; ++ } ++} ++ ++/* ++ * Complete a normal reporting pass, pairing with a start_report() call. ++ * This handles any UTRACE_DETACH or UTRACE_REPORT or UTRACE_INTERRUPT ++ * returns from engine callbacks. If @will_not_stop is true and any ++ * engine's last callback used UTRACE_STOP, we do UTRACE_REPORT here to ++ * ensure we stop before user mode. If there were no callbacks made, it ++ * will recompute @task->utrace_flags to avoid another false-positive. ++ */ ++static void finish_report(struct task_struct *task, struct utrace *utrace, ++ struct utrace_report *report, bool will_not_stop) ++{ ++ enum utrace_resume_action resume = report->action; ++ ++ if (resume == UTRACE_STOP) ++ resume = will_not_stop ? UTRACE_REPORT : UTRACE_RESUME; ++ ++ if (resume < utrace->resume) { ++ spin_lock(&utrace->lock); ++ utrace->resume = resume; ++ if (resume == UTRACE_INTERRUPT) ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++ else ++ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME); ++ spin_unlock(&utrace->lock); ++ } ++ ++ finish_report_reset(task, utrace, report); ++} ++ ++static void finish_callback_report(struct task_struct *task, ++ struct utrace *utrace, ++ struct utrace_report *report, ++ struct utrace_engine *engine, ++ enum utrace_resume_action action) ++{ ++ if (action == UTRACE_DETACH) { ++ /* ++ * By holding the lock here, we make sure that ++ * utrace_barrier() (really get_utrace_lock()) sees the ++ * effect of this detach. Otherwise utrace_barrier() could ++ * return 0 after this callback had returned UTRACE_DETACH. ++ * This way, a 0 return is an unambiguous indicator that any ++ * callback returning UTRACE_DETACH has indeed caused detach. ++ */ ++ spin_lock(&utrace->lock); ++ engine->ops = &utrace_detached_ops; ++ spin_unlock(&utrace->lock); ++ } ++ ++ /* ++ * If utrace_control() was used, treat that like UTRACE_DETACH here. ++ */ ++ if (engine->ops == &utrace_detached_ops) { ++ report->detaches = true; ++ return; ++ } ++ ++ if (action < report->action) ++ report->action = action; ++ ++ if (action != UTRACE_STOP) { ++ if (action < report->resume_action) ++ report->resume_action = action; ++ ++ if (engine_wants_stop(engine)) { ++ spin_lock(&utrace->lock); ++ clear_engine_wants_stop(engine); ++ spin_unlock(&utrace->lock); ++ } ++ ++ return; ++ } ++ ++ if (!engine_wants_stop(engine)) { ++ spin_lock(&utrace->lock); ++ /* ++ * If utrace_control() came in and detached us ++ * before we got the lock, we must not stop now. ++ */ ++ if (unlikely(engine->ops == &utrace_detached_ops)) ++ report->detaches = true; ++ else ++ mark_engine_wants_stop(task, engine); ++ spin_unlock(&utrace->lock); ++ } ++} ++ ++/* ++ * Apply the return value of one engine callback to @report. ++ * Returns true if @engine detached and should not get any more callbacks. ++ */ ++static bool finish_callback(struct task_struct *task, struct utrace *utrace, ++ struct utrace_report *report, ++ struct utrace_engine *engine, ++ u32 ret) ++{ ++ report->result = ret & ~UTRACE_RESUME_MASK; ++ finish_callback_report(task, utrace, report, engine, ++ utrace_resume_action(ret)); ++ ++ /* ++ * Now that we have applied the effect of the return value, ++ * clear this so that utrace_barrier() can stop waiting. ++ * A subsequent utrace_control() can stop or resume @engine ++ * and know this was ordered after its callback's action. ++ * ++ * We don't need any barriers here because utrace_barrier() ++ * takes utrace->lock. If we touched engine->flags above, ++ * the lock guaranteed this change was before utrace_barrier() ++ * examined utrace->reporting. ++ */ ++ utrace->reporting = NULL; ++ ++ /* ++ * We've just done an engine callback. These are allowed to sleep, ++ * though all well-behaved ones restrict that to blocking kalloc() ++ * or quickly-acquired mutex_lock() and the like. This is a good ++ * place to make sure tracing engines don't introduce too much ++ * latency under voluntary preemption. ++ */ ++ might_sleep(); ++ ++ return engine->ops == &utrace_detached_ops; ++} ++ ++/* ++ * Start the callbacks for @engine to consider @event (a bit mask). ++ * This makes the report_quiesce() callback first. If @engine wants ++ * a specific callback for @event, we return the ops vector to use. ++ * If not, we return NULL. The return value from the ops->callback ++ * function called should be passed to finish_callback(). ++ */ ++static const struct utrace_engine_ops *start_callback( ++ struct utrace *utrace, struct utrace_report *report, ++ struct utrace_engine *engine, struct task_struct *task, ++ unsigned long event) ++{ ++ const struct utrace_engine_ops *ops; ++ unsigned long want; ++ ++ /* ++ * This barrier ensures that we've set utrace->reporting before ++ * we examine engine->flags or engine->ops. utrace_barrier() ++ * relies on this ordering to indicate that the effect of any ++ * utrace_control() and utrace_set_events() calls is in place ++ * by the time utrace->reporting can be seen to be NULL. ++ */ ++ utrace->reporting = engine; ++ smp_mb(); ++ ++ /* ++ * This pairs with the barrier in mark_engine_detached(). ++ * It makes sure that we never see the old ops vector with ++ * the new flags, in case the original vector had no report_quiesce. ++ */ ++ want = engine->flags; ++ smp_rmb(); ++ ops = engine->ops; ++ ++ if (want & UTRACE_EVENT(QUIESCE)) { ++ if (finish_callback(task, utrace, report, engine, ++ (*ops->report_quiesce)(report->action, ++ engine, event))) ++ return NULL; ++ ++ /* ++ * finish_callback() reset utrace->reporting after the ++ * quiesce callback. Now we set it again (as above) ++ * before re-examining engine->flags, which could have ++ * been changed synchronously by ->report_quiesce or ++ * asynchronously by utrace_control() or utrace_set_events(). ++ */ ++ utrace->reporting = engine; ++ smp_mb(); ++ want = engine->flags; ++ } ++ ++ if (want & ENGINE_STOP) ++ report->action = UTRACE_STOP; ++ ++ if (want & event) { ++ report->spurious = false; ++ return ops; ++ } ++ ++ utrace->reporting = NULL; ++ return NULL; ++} ++ ++/* ++ * Do a normal reporting pass for engines interested in @event. ++ * @callback is the name of the member in the ops vector, and remaining ++ * args are the extras it takes after the standard three args. ++ */ ++#define REPORT_CALLBACKS(rev, task, utrace, report, event, callback, ...) \ ++ do { \ ++ struct utrace_engine *engine; \ ++ const struct utrace_engine_ops *ops; \ ++ list_for_each_entry##rev(engine, &utrace->attached, entry) { \ ++ ops = start_callback(utrace, report, engine, task, \ ++ event); \ ++ if (!ops) \ ++ continue; \ ++ finish_callback(task, utrace, report, engine, \ ++ (*ops->callback)(__VA_ARGS__)); \ ++ } \ ++ } while (0) ++#define REPORT(task, utrace, report, event, callback, ...) \ ++ do { \ ++ start_report(utrace); \ ++ REPORT_CALLBACKS(, task, utrace, report, event, callback, \ ++ (report)->action, engine, ## __VA_ARGS__); \ ++ finish_report(task, utrace, report, true); \ ++ } while (0) ++ ++/* ++ * Called iff UTRACE_EVENT(EXEC) flag is set. ++ */ ++void utrace_report_exec(struct linux_binfmt *fmt, struct linux_binprm *bprm, ++ struct pt_regs *regs) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(EXEC), ++ report_exec, fmt, bprm, regs); ++} ++ ++static u32 do_report_syscall_entry(struct pt_regs *regs, ++ struct task_struct *task, ++ struct utrace *utrace, ++ struct utrace_report *report, ++ u32 resume_report) ++{ ++ start_report(utrace); ++ REPORT_CALLBACKS(_reverse, task, utrace, report, ++ UTRACE_EVENT(SYSCALL_ENTRY), report_syscall_entry, ++ resume_report | report->result | report->action, ++ engine, regs); ++ finish_report(task, utrace, report, false); ++ ++ if (report->action != UTRACE_STOP) ++ return 0; ++ ++ utrace_stop(task, utrace, report->resume_action); ++ ++ if (fatal_signal_pending(task)) { ++ /* ++ * We are continuing despite UTRACE_STOP because of a ++ * SIGKILL. Don't let the system call actually proceed. ++ */ ++ report->result = UTRACE_SYSCALL_ABORT; ++ } else if (utrace->resume <= UTRACE_REPORT) { ++ /* ++ * If we've been asked for another report after our stop, ++ * go back to report (and maybe stop) again before we run ++ * the system call. The second (and later) reports are ++ * marked with the UTRACE_SYSCALL_RESUMED flag so that ++ * engines know this is a second report at the same ++ * entry. This gives them the chance to examine the ++ * registers anew after they might have been changed ++ * while we were stopped. ++ */ ++ report->detaches = false; ++ report->spurious = true; ++ report->action = report->resume_action = UTRACE_RESUME; ++ return UTRACE_SYSCALL_RESUMED; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Called iff UTRACE_EVENT(SYSCALL_ENTRY) flag is set. ++ * Return true to prevent the system call. ++ */ ++bool utrace_report_syscall_entry(struct pt_regs *regs) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ u32 resume_report = 0; ++ ++ do { ++ resume_report = do_report_syscall_entry(regs, task, utrace, ++ &report, resume_report); ++ } while (resume_report); ++ ++ return utrace_syscall_action(report.result) == UTRACE_SYSCALL_ABORT; ++} ++ ++/* ++ * Called iff UTRACE_EVENT(SYSCALL_EXIT) flag is set. ++ */ ++void utrace_report_syscall_exit(struct pt_regs *regs) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT), ++ report_syscall_exit, regs); ++} ++ ++/* ++ * Called iff UTRACE_EVENT(CLONE) flag is set. ++ * This notification call blocks the wake_up_new_task call on the child. ++ * So we must not quiesce here. tracehook_report_clone_complete will do ++ * a quiescence check momentarily. ++ */ ++void utrace_report_clone(unsigned long clone_flags, struct task_struct *child) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ /* ++ * We don't use the REPORT() macro here, because we need ++ * to clear utrace->cloning before finish_report(). ++ * After finish_report(), utrace can be a stale pointer ++ * in cases when report.action is still UTRACE_RESUME. ++ */ ++ start_report(utrace); ++ utrace->cloning = child; ++ ++ REPORT_CALLBACKS(, task, utrace, &report, ++ UTRACE_EVENT(CLONE), report_clone, ++ report.action, engine, clone_flags, child); ++ ++ utrace->cloning = NULL; ++ finish_report(task, utrace, &report, !(clone_flags & CLONE_VFORK)); ++ ++ /* ++ * For a vfork, we will go into an uninterruptible block waiting ++ * for the child. We need UTRACE_STOP to happen before this, not ++ * after. For CLONE_VFORK, utrace_finish_vfork() will be called. ++ */ ++ if (report.action == UTRACE_STOP && (clone_flags & CLONE_VFORK)) { ++ spin_lock(&utrace->lock); ++ utrace->vfork_stop = 1; ++ spin_unlock(&utrace->lock); ++ } ++} ++ ++/* ++ * We're called after utrace_report_clone() for a CLONE_VFORK. ++ * If UTRACE_STOP was left from the clone report, we stop here. ++ * After this, we'll enter the uninterruptible wait_for_completion() ++ * waiting for the child. ++ */ ++void utrace_finish_vfork(struct task_struct *task) ++{ ++ struct utrace *utrace = task_utrace_struct(task); ++ ++ if (utrace->vfork_stop) { ++ spin_lock(&utrace->lock); ++ utrace->vfork_stop = 0; ++ spin_unlock(&utrace->lock); ++ utrace_stop(task, utrace, UTRACE_RESUME); /* XXX */ ++ } ++} ++ ++/* ++ * Called iff UTRACE_EVENT(JCTL) flag is set. ++ * ++ * Called with siglock held. ++ */ ++void utrace_report_jctl(int notify, int what) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ ++ spin_unlock_irq(&task->sighand->siglock); ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(JCTL), ++ report_jctl, what, notify); ++ ++ spin_lock_irq(&task->sighand->siglock); ++} ++ ++/* ++ * Called iff UTRACE_EVENT(EXIT) flag is set. ++ */ ++void utrace_report_exit(long *exit_code) ++{ ++ struct task_struct *task = current; ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ long orig_code = *exit_code; ++ ++ REPORT(task, utrace, &report, UTRACE_EVENT(EXIT), ++ report_exit, orig_code, exit_code); ++ ++ if (report.action == UTRACE_STOP) ++ utrace_stop(task, utrace, report.resume_action); ++} ++ ++/* ++ * Called iff UTRACE_EVENT(DEATH) or UTRACE_EVENT(QUIESCE) flag is set. ++ * ++ * It is always possible that we are racing with utrace_release_task here. ++ * For this reason, utrace_release_task checks for the event bits that get ++ * us here, and delays its cleanup for us to do. ++ */ ++void utrace_report_death(struct task_struct *task, struct utrace *utrace, ++ bool group_dead, int signal) ++{ ++ INIT_REPORT(report); ++ ++ BUG_ON(!task->exit_state); ++ ++ /* ++ * We are presently considered "quiescent"--which is accurate ++ * inasmuch as we won't run any more user instructions ever again. ++ * But for utrace_control and utrace_set_events to be robust, they ++ * must be sure whether or not we will run any more callbacks. If ++ * a call comes in before we do, taking the lock here synchronizes ++ * us so we don't run any callbacks just disabled. Calls that come ++ * in while we're running the callbacks will see the exit.death ++ * flag and know that we are not yet fully quiescent for purposes ++ * of detach bookkeeping. ++ */ ++ spin_lock(&utrace->lock); ++ BUG_ON(utrace->death); ++ utrace->death = 1; ++ utrace->resume = UTRACE_RESUME; ++ splice_attaching(utrace); ++ spin_unlock(&utrace->lock); ++ ++ REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH), ++ report_death, engine, group_dead, signal); ++ ++ utrace_maybe_reap(task, utrace, false); ++} ++ ++/* ++ * Finish the last reporting pass before returning to user mode. ++ */ ++static void finish_resume_report(struct task_struct *task, ++ struct utrace *utrace, ++ struct utrace_report *report) ++{ ++ finish_report_reset(task, utrace, report); ++ ++ switch (report->action) { ++ case UTRACE_STOP: ++ utrace_stop(task, utrace, report->resume_action); ++ break; ++ ++ case UTRACE_INTERRUPT: ++ if (!signal_pending(task)) ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++ break; ++ ++ case UTRACE_BLOCKSTEP: ++ if (likely(arch_has_block_step())) { ++ user_enable_block_step(task); ++ break; ++ } ++ ++ /* ++ * This means some callback is to blame for failing ++ * to check arch_has_block_step() itself. Warn and ++ * then fall through to treat it as SINGLESTEP. ++ */ ++ WARN(1, "UTRACE_BLOCKSTEP when !arch_has_block_step()"); ++ ++ case UTRACE_SINGLESTEP: ++ if (likely(arch_has_single_step())) { ++ user_enable_single_step(task); ++ } else { ++ /* ++ * This means some callback is to blame for failing ++ * to check arch_has_single_step() itself. Spew ++ * about it so the loser will fix his module. ++ */ ++ WARN(1, ++ "UTRACE_SINGLESTEP when !arch_has_single_step()"); ++ } ++ break; ++ ++ case UTRACE_REPORT: ++ case UTRACE_RESUME: ++ default: ++ user_disable_single_step(task); ++ break; ++ } ++} ++ ++/* ++ * This is called when TIF_NOTIFY_RESUME had been set (and is now clear). ++ * We are close to user mode, and this is the place to report or stop. ++ * When we return, we're going to user mode or into the signals code. ++ */ ++void utrace_resume(struct task_struct *task, struct pt_regs *regs) ++{ ++ struct utrace *utrace = task_utrace_struct(task); ++ INIT_REPORT(report); ++ struct utrace_engine *engine; ++ ++ /* ++ * Some machines get here with interrupts disabled. The same arch ++ * code path leads to calling into get_signal_to_deliver(), which ++ * implicitly reenables them by virtue of spin_unlock_irq. ++ */ ++ local_irq_enable(); ++ ++ /* ++ * If this flag is still set it's because there was a signal ++ * handler setup done but no report_signal following it. Clear ++ * the flag before we get to user so it doesn't confuse us later. ++ */ ++ if (unlikely(utrace->signal_handler)) { ++ spin_lock(&utrace->lock); ++ utrace->signal_handler = 0; ++ spin_unlock(&utrace->lock); ++ } ++ ++ /* ++ * Update our bookkeeping even if there are no callbacks made here. ++ */ ++ report.action = start_report(utrace); ++ ++ switch (report.action) { ++ case UTRACE_RESUME: ++ /* ++ * Anything we might have done was already handled by ++ * utrace_get_signal(), or this is an entirely spurious ++ * call. (The arch might use TIF_NOTIFY_RESUME for other ++ * purposes as well as calling us.) ++ */ ++ return; ++ case UTRACE_REPORT: ++ if (unlikely(!(task->utrace_flags & UTRACE_EVENT(QUIESCE)))) ++ break; ++ /* ++ * Do a simple reporting pass, with no specific ++ * callback after report_quiesce. ++ */ ++ report.action = UTRACE_RESUME; ++ list_for_each_entry(engine, &utrace->attached, entry) ++ start_callback(utrace, &report, engine, task, 0); ++ break; ++ default: ++ /* ++ * Even if this report was truly spurious, there is no need ++ * for utrace_reset() now. TIF_NOTIFY_RESUME was already ++ * cleared--it doesn't stay spuriously set. ++ */ ++ report.spurious = false; ++ break; ++ } ++ ++ /* ++ * Finish the report and either stop or get ready to resume. ++ * If utrace->resume was not UTRACE_REPORT, this applies its ++ * effect now (i.e. step or interrupt). ++ */ ++ finish_resume_report(task, utrace, &report); ++} ++ ++/* ++ * Return true if current has forced signal_pending(). ++ * ++ * This is called only when current->utrace_flags is nonzero, so we know ++ * that current->utrace must be set. It's not inlined in tracehook.h ++ * just so that struct utrace can stay opaque outside this file. ++ */ ++bool utrace_interrupt_pending(void) ++{ ++ return task_utrace_struct(current)->resume == UTRACE_INTERRUPT; ++} ++ ++/* ++ * Take the siglock and push @info back on our queue. ++ * Returns with @task->sighand->siglock held. ++ */ ++static void push_back_signal(struct task_struct *task, siginfo_t *info) ++ __acquires(task->sighand->siglock) ++{ ++ struct sigqueue *q; ++ ++ if (unlikely(!info->si_signo)) { /* Oh, a wise guy! */ ++ spin_lock_irq(&task->sighand->siglock); ++ return; ++ } ++ ++ q = sigqueue_alloc(); ++ if (likely(q)) { ++ q->flags = 0; ++ copy_siginfo(&q->info, info); ++ } ++ ++ spin_lock_irq(&task->sighand->siglock); ++ ++ sigaddset(&task->pending.signal, info->si_signo); ++ if (likely(q)) ++ list_add(&q->list, &task->pending.list); ++ ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++} ++ ++/* ++ * This is the hook from the signals code, called with the siglock held. ++ * Here is the ideal place to stop. We also dequeue and intercept signals. ++ */ ++int utrace_get_signal(struct task_struct *task, struct pt_regs *regs, ++ siginfo_t *info, struct k_sigaction *return_ka) ++ __releases(task->sighand->siglock) ++ __acquires(task->sighand->siglock) ++{ ++ struct utrace *utrace; ++ struct k_sigaction *ka; ++ INIT_REPORT(report); ++ struct utrace_engine *engine; ++ const struct utrace_engine_ops *ops; ++ unsigned long event, want; ++ u32 ret; ++ int signr; ++ ++ utrace = task_utrace_struct(task); ++ if (utrace->resume < UTRACE_RESUME || ++ utrace->pending_attach || utrace->signal_handler) { ++ enum utrace_resume_action resume; ++ ++ /* ++ * We've been asked for an explicit report before we ++ * even check for pending signals. ++ */ ++ ++ spin_unlock_irq(&task->sighand->siglock); ++ ++ spin_lock(&utrace->lock); ++ ++ splice_attaching(utrace); ++ ++ report.result = utrace->signal_handler ? ++ UTRACE_SIGNAL_HANDLER : UTRACE_SIGNAL_REPORT; ++ utrace->signal_handler = 0; ++ ++ resume = utrace->resume; ++ utrace->resume = UTRACE_RESUME; ++ ++ spin_unlock(&utrace->lock); ++ ++ /* ++ * Make sure signal_pending() only returns true ++ * if there are real signals pending. ++ */ ++ if (signal_pending(task)) { ++ spin_lock_irq(&task->sighand->siglock); ++ recalc_sigpending(); ++ spin_unlock_irq(&task->sighand->siglock); ++ } ++ ++ if (resume > UTRACE_REPORT) { ++ /* ++ * We only got here to process utrace->resume. ++ * Despite no callbacks, this report is not spurious. ++ */ ++ report.action = resume; ++ report.spurious = false; ++ finish_resume_report(task, utrace, &report); ++ return -1; ++ } else if (!(task->utrace_flags & UTRACE_EVENT(QUIESCE))) { ++ /* ++ * We only got here to clear utrace->signal_handler. ++ */ ++ return -1; ++ } ++ ++ /* ++ * Do a reporting pass for no signal, just for EVENT(QUIESCE). ++ * The engine callbacks can fill in *info and *return_ka. ++ * We'll pass NULL for the @orig_ka argument to indicate ++ * that there was no original signal. ++ */ ++ event = 0; ++ ka = NULL; ++ memset(return_ka, 0, sizeof *return_ka); ++ } else if (!(task->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) || ++ unlikely(task->signal->group_stop_count)) { ++ /* ++ * If no engine is interested in intercepting signals or ++ * we must stop, let the caller just dequeue them normally ++ * or participate in group-stop. ++ */ ++ return 0; ++ } else { ++ /* ++ * Steal the next signal so we can let tracing engines ++ * examine it. From the signal number and sigaction, ++ * determine what normal delivery would do. If no ++ * engine perturbs it, we'll do that by returning the ++ * signal number after setting *return_ka. ++ */ ++ signr = dequeue_signal(task, &task->blocked, info); ++ if (signr == 0) ++ return signr; ++ BUG_ON(signr != info->si_signo); ++ ++ ka = &task->sighand->action[signr - 1]; ++ *return_ka = *ka; ++ ++ /* ++ * We are never allowed to interfere with SIGKILL. ++ * Just punt after filling in *return_ka for our caller. ++ */ ++ if (signr == SIGKILL) ++ return signr; ++ ++ if (ka->sa.sa_handler == SIG_IGN) { ++ event = UTRACE_EVENT(SIGNAL_IGN); ++ report.result = UTRACE_SIGNAL_IGN; ++ } else if (ka->sa.sa_handler != SIG_DFL) { ++ event = UTRACE_EVENT(SIGNAL); ++ report.result = UTRACE_SIGNAL_DELIVER; ++ } else if (sig_kernel_coredump(signr)) { ++ event = UTRACE_EVENT(SIGNAL_CORE); ++ report.result = UTRACE_SIGNAL_CORE; ++ } else if (sig_kernel_ignore(signr)) { ++ event = UTRACE_EVENT(SIGNAL_IGN); ++ report.result = UTRACE_SIGNAL_IGN; ++ } else if (signr == SIGSTOP) { ++ event = UTRACE_EVENT(SIGNAL_STOP); ++ report.result = UTRACE_SIGNAL_STOP; ++ } else if (sig_kernel_stop(signr)) { ++ event = UTRACE_EVENT(SIGNAL_STOP); ++ report.result = UTRACE_SIGNAL_TSTP; ++ } else { ++ event = UTRACE_EVENT(SIGNAL_TERM); ++ report.result = UTRACE_SIGNAL_TERM; ++ } ++ ++ /* ++ * Now that we know what event type this signal is, we ++ * can short-circuit if no engines care about those. ++ */ ++ if ((task->utrace_flags & (event | UTRACE_EVENT(QUIESCE))) == 0) ++ return signr; ++ ++ /* ++ * We have some interested engines, so tell them about ++ * the signal and let them change its disposition. ++ */ ++ spin_unlock_irq(&task->sighand->siglock); ++ } ++ ++ /* ++ * This reporting pass chooses what signal disposition we'll act on. ++ */ ++ list_for_each_entry(engine, &utrace->attached, entry) { ++ /* ++ * See start_callback() comment about this barrier. ++ */ ++ utrace->reporting = engine; ++ smp_mb(); ++ ++ /* ++ * This pairs with the barrier in mark_engine_detached(), ++ * see start_callback() comments. ++ */ ++ want = engine->flags; ++ smp_rmb(); ++ ops = engine->ops; ++ ++ if ((want & (event | UTRACE_EVENT(QUIESCE))) == 0) { ++ utrace->reporting = NULL; ++ continue; ++ } ++ ++ if (ops->report_signal) ++ ret = (*ops->report_signal)( ++ report.result | report.action, engine, ++ regs, info, ka, return_ka); ++ else ++ ret = (report.result | (*ops->report_quiesce)( ++ report.action, engine, event)); ++ ++ /* ++ * Avoid a tight loop reporting again and again if some ++ * engine is too stupid. ++ */ ++ switch (utrace_resume_action(ret)) { ++ default: ++ break; ++ case UTRACE_INTERRUPT: ++ case UTRACE_REPORT: ++ ret = (ret & ~UTRACE_RESUME_MASK) | UTRACE_RESUME; ++ break; ++ } ++ ++ finish_callback(task, utrace, &report, engine, ret); ++ } ++ ++ /* ++ * We express the chosen action to the signals code in terms ++ * of a representative signal whose default action does it. ++ * Our caller uses our return value (signr) to decide what to ++ * do, but uses info->si_signo as the signal number to report. ++ */ ++ switch (utrace_signal_action(report.result)) { ++ case UTRACE_SIGNAL_TERM: ++ signr = SIGTERM; ++ break; ++ ++ case UTRACE_SIGNAL_CORE: ++ signr = SIGQUIT; ++ break; ++ ++ case UTRACE_SIGNAL_STOP: ++ signr = SIGSTOP; ++ break; ++ ++ case UTRACE_SIGNAL_TSTP: ++ signr = SIGTSTP; ++ break; ++ ++ case UTRACE_SIGNAL_DELIVER: ++ signr = info->si_signo; ++ ++ if (return_ka->sa.sa_handler == SIG_DFL) { ++ /* ++ * We'll do signr's normal default action. ++ * For ignore, we'll fall through below. ++ * For stop/death, break locks and returns it. ++ */ ++ if (likely(signr) && !sig_kernel_ignore(signr)) ++ break; ++ } else if (return_ka->sa.sa_handler != SIG_IGN && ++ likely(signr)) { ++ /* ++ * Complete the bookkeeping after the report. ++ * The handler will run. If an engine wanted to ++ * stop or step, then make sure we do another ++ * report after signal handler setup. ++ */ ++ if (report.action != UTRACE_RESUME) ++ report.action = UTRACE_INTERRUPT; ++ finish_report(task, utrace, &report, true); ++ ++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) ++ push_back_signal(task, info); ++ else ++ spin_lock_irq(&task->sighand->siglock); ++ ++ /* ++ * We do the SA_ONESHOT work here since the ++ * normal path will only touch *return_ka now. ++ */ ++ if (unlikely(return_ka->sa.sa_flags & SA_ONESHOT)) { ++ return_ka->sa.sa_flags &= ~SA_ONESHOT; ++ if (likely(valid_signal(signr))) { ++ ka = &task->sighand->action[signr - 1]; ++ ka->sa.sa_handler = SIG_DFL; ++ } ++ } ++ ++ return signr; ++ } ++ ++ /* Fall through for an ignored signal. */ ++ ++ case UTRACE_SIGNAL_IGN: ++ case UTRACE_SIGNAL_REPORT: ++ default: ++ /* ++ * If the signal is being ignored, then we are on the way ++ * directly back to user mode. We can stop here, or step, ++ * as in utrace_resume(), above. After we've dealt with that, ++ * our caller will relock and come back through here. ++ */ ++ finish_resume_report(task, utrace, &report); ++ ++ if (unlikely(fatal_signal_pending(task))) { ++ /* ++ * The only reason we woke up now was because of a ++ * SIGKILL. Don't do normal dequeuing in case it ++ * might get a signal other than SIGKILL. That would ++ * perturb the death state so it might differ from ++ * what the debugger would have allowed to happen. ++ * Instead, pluck out just the SIGKILL to be sure ++ * we'll die immediately with nothing else different ++ * from the quiescent state the debugger wanted us in. ++ */ ++ sigset_t sigkill_only; ++ siginitsetinv(&sigkill_only, sigmask(SIGKILL)); ++ spin_lock_irq(&task->sighand->siglock); ++ signr = dequeue_signal(task, &sigkill_only, info); ++ BUG_ON(signr != SIGKILL); ++ *return_ka = task->sighand->action[SIGKILL - 1]; ++ return signr; ++ } ++ ++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) { ++ push_back_signal(task, info); ++ spin_unlock_irq(&task->sighand->siglock); ++ } ++ ++ return -1; ++ } ++ ++ /* ++ * Complete the bookkeeping after the report. ++ * This sets utrace->resume if UTRACE_STOP was used. ++ */ ++ finish_report(task, utrace, &report, true); ++ ++ return_ka->sa.sa_handler = SIG_DFL; ++ ++ /* ++ * If this signal is fatal, si_signo gets through as exit_code. ++ * We can't allow a completely bogus value there or else core ++ * kernel code can freak out. (If an engine wants to control ++ * the exit_code value exactly, it can do so in report_exit.) ++ * We'll produce a big complaint in dmesg, but won't crash. ++ * That's nicer for debugging your utrace engine. ++ */ ++ if (unlikely(info->si_signo & 0x80)) { ++ WARN(1, "utrace engine left bogus si_signo value!"); ++ info->si_signo = SIGTRAP; ++ } ++ ++ if (unlikely(report.result & UTRACE_SIGNAL_HOLD)) ++ push_back_signal(task, info); ++ else ++ spin_lock_irq(&task->sighand->siglock); ++ ++ if (sig_kernel_stop(signr)) ++ task->signal->flags |= SIGNAL_STOP_DEQUEUED; ++ ++ return signr; ++} ++ ++/* ++ * This gets called after a signal handler has been set up. ++ * We set a flag so the next report knows it happened. ++ * If we're already stepping, make sure we do a report_signal. ++ * If not, make sure we get into utrace_resume() where we can ++ * clear the signal_handler flag before resuming. ++ */ ++void utrace_signal_handler(struct task_struct *task, int stepping) ++{ ++ struct utrace *utrace = task_utrace_struct(task); ++ ++ spin_lock(&utrace->lock); ++ ++ utrace->signal_handler = 1; ++ if (utrace->resume > UTRACE_INTERRUPT) { ++ if (stepping) { ++ utrace->resume = UTRACE_INTERRUPT; ++ set_tsk_thread_flag(task, TIF_SIGPENDING); ++ } else if (utrace->resume == UTRACE_RESUME) { ++ set_tsk_thread_flag(task, TIF_NOTIFY_RESUME); ++ } ++ } ++ ++ spin_unlock(&utrace->lock); ++} ++ ++/** ++ * utrace_prepare_examine - prepare to examine thread state ++ * @target: thread of interest, a &struct task_struct pointer ++ * @engine: engine pointer returned by utrace_attach_task() ++ * @exam: temporary state, a &struct utrace_examiner pointer ++ * ++ * This call prepares to safely examine the thread @target using ++ * &struct user_regset calls, or direct access to thread-synchronous fields. ++ * ++ * When @target is current, this call is superfluous. When @target is ++ * another thread, it must be held stopped via %UTRACE_STOP by @engine. ++ * ++ * This call may block the caller until @target stays stopped, so it must ++ * be called only after the caller is sure @target is about to unschedule. ++ * This means a zero return from a utrace_control() call on @engine giving ++ * %UTRACE_STOP, or a report_quiesce() or report_signal() callback to ++ * @engine that used %UTRACE_STOP in its return value. ++ * ++ * Returns -%ESRCH if @target is dead or -%EINVAL if %UTRACE_STOP was ++ * not used. If @target has started running again despite %UTRACE_STOP ++ * (for %SIGKILL or a spurious wakeup), this call returns -%EAGAIN. ++ * ++ * When this call returns zero, it's safe to use &struct user_regset ++ * calls and task_user_regset_view() on @target and to examine some of ++ * its fields directly. When the examination is complete, a ++ * utrace_finish_examine() call must follow to check whether it was ++ * completed safely. ++ */ ++int utrace_prepare_examine(struct task_struct *target, ++ struct utrace_engine *engine, ++ struct utrace_examiner *exam) ++{ ++ int ret = 0; ++ ++ if (unlikely(target == current)) ++ return 0; ++ ++ rcu_read_lock(); ++ if (unlikely(!engine_wants_stop(engine))) ++ ret = -EINVAL; ++ else if (unlikely(target->exit_state)) ++ ret = -ESRCH; ++ else { ++ exam->state = target->state; ++ if (unlikely(exam->state == TASK_RUNNING)) ++ ret = -EAGAIN; ++ else ++ get_task_struct(target); ++ } ++ rcu_read_unlock(); ++ ++ if (likely(!ret)) { ++ exam->ncsw = wait_task_inactive(target, exam->state); ++ put_task_struct(target); ++ if (unlikely(!exam->ncsw)) ++ ret = -EAGAIN; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_prepare_examine); ++ ++/** ++ * utrace_finish_examine - complete an examination of thread state ++ * @target: thread of interest, a &struct task_struct pointer ++ * @engine: engine pointer returned by utrace_attach_task() ++ * @exam: pointer passed to utrace_prepare_examine() call ++ * ++ * This call completes an examination on the thread @target begun by a ++ * paired utrace_prepare_examine() call with the same arguments that ++ * returned success (zero). ++ * ++ * When @target is current, this call is superfluous. When @target is ++ * another thread, this returns zero if @target has remained unscheduled ++ * since the paired utrace_prepare_examine() call returned zero. ++ * ++ * When this returns an error, any examination done since the paired ++ * utrace_prepare_examine() call is unreliable and the data extracted ++ * should be discarded. The error is -%EINVAL if @engine is not ++ * keeping @target stopped, or -%EAGAIN if @target woke up unexpectedly. ++ */ ++int utrace_finish_examine(struct task_struct *target, ++ struct utrace_engine *engine, ++ struct utrace_examiner *exam) ++{ ++ int ret = 0; ++ ++ if (unlikely(target == current)) ++ return 0; ++ ++ rcu_read_lock(); ++ if (unlikely(!engine_wants_stop(engine))) ++ ret = -EINVAL; ++ else if (unlikely(target->state != exam->state)) ++ ret = -EAGAIN; ++ else ++ get_task_struct(target); ++ rcu_read_unlock(); ++ ++ if (likely(!ret)) { ++ unsigned long ncsw = wait_task_inactive(target, exam->state); ++ if (unlikely(ncsw != exam->ncsw)) ++ ret = -EAGAIN; ++ put_task_struct(target); ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(utrace_finish_examine); ++ ++/* ++ * This is declared in linux/regset.h and defined in machine-dependent ++ * code. We put the export here to ensure no machine forgets it. ++ */ ++EXPORT_SYMBOL_GPL(task_user_regset_view); ++ ++/* ++ * Called with rcu_read_lock() held. ++ */ ++void task_utrace_proc_status(struct seq_file *m, struct task_struct *p) ++{ ++ seq_printf(m, "Utrace:\t%lx\n", p->utrace_flags); ++} diff --git a/linux-2.6-v4l-dvb-add-kworld-a340-support.patch b/linux-2.6-v4l-dvb-add-kworld-a340-support.patch new file mode 100644 index 000000000..0c7d24117 --- /dev/null +++ b/linux-2.6-v4l-dvb-add-kworld-a340-support.patch @@ -0,0 +1,161 @@ +From c34c78838f02693a70808e38309629e85aa50266 Mon Sep 17 00:00:00 2001 +From: Jarod Wilson +Date: Thu, 20 May 2010 10:03:13 -0400 +Subject: [PATCH] dvb: add support for kworld 340u and ub435-q to em28xx-dvb + +This adds support for the KWorld PlusTV 340U and KWorld UB345-Q ATSC +sticks, which are really the same device. The sticks have an eMPIA +em2870 usb bridge chipset, an LG Electronics LGDT3304 ATSC/QAM +demodulator and an NXP TDA18271HD tuner -- early versions of the 340U +have a a TDA18271HD/C1, later models and the UB435-Q have a C2. + +The stick has been tested succesfully with both VSB_8 and QAM_256 signals. +Its using lgdt3304 support added to the lgdt3305 driver by a prior patch, +rather than the current lgdt3304 driver, as its severely lacking in +functionality by comparison (see said patch for details). + +Signed-off-by: Jarod Wilson +--- + drivers/media/video/em28xx/em28xx-cards.c | 28 ++++++++++++++++++++++++ + drivers/media/video/em28xx/em28xx-dvb.c | 33 +++++++++++++++++++++++++++++ + drivers/media/video/em28xx/em28xx.h | 1 + + 3 files changed, 62 insertions(+), 0 deletions(-) + +diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c +index b0fb083..6312e76 100644 +--- a/drivers/media/video/em28xx/em28xx-cards.c ++++ b/drivers/media/video/em28xx/em28xx-cards.c +@@ -158,6 +158,22 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { + { -1, -1, -1, -1}, + }; + ++/* ++ * KWorld PlusTV 340U and UB435-Q (ATSC) GPIOs map: ++ * EM_GPIO_0 - currently unknown ++ * EM_GPIO_1 - LED disable/enable (1 = off, 0 = on) ++ * EM_GPIO_2 - currently unknown ++ * EM_GPIO_3 - currently unknown ++ * EM_GPIO_4 - TDA18271HD/C1 tuner (1 = active, 0 = in reset) ++ * EM_GPIO_5 - LGDT3304 ATSC/QAM demod (1 = active, 0 = in reset) ++ * EM_GPIO_6 - currently unknown ++ * EM_GPIO_7 - currently unknown ++ */ ++static struct em28xx_reg_seq kworld_a340_digital[] = { ++ {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10}, ++ { -1, -1, -1, -1}, ++}; ++ + /* Pinnacle Hybrid Pro eb1a:2881 */ + static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { + {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10}, +@@ -1649,6 +1665,16 @@ struct em28xx_board em28xx_boards[] = { + .tuner_gpio = reddo_dvb_c_usb_box, + .has_dvb = 1, + }, ++ /* 1b80:a340 - Empia EM2870, NXP TDA18271HD and LG DT3304, sold ++ * initially as the KWorld PlusTV 340U, then as the UB435-Q. ++ * Early variants have a TDA18271HD/C1, later ones a TDA18271HD/C2 */ ++ [EM2870_BOARD_KWORLD_A340] = { ++ .name = "KWorld PlusTV 340U or UB435-Q (ATSC)", ++ .tuner_type = TUNER_ABSENT, /* Digital-only TDA18271HD */ ++ .has_dvb = 1, ++ .dvb_gpio = kworld_a340_digital, ++ .tuner_gpio = default_tuner_gpio, ++ }, + }; + const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); + +@@ -1768,6 +1794,8 @@ struct usb_device_id em28xx_id_table[] = { + .driver_info = EM2820_BOARD_IODATA_GVMVP_SZ }, + { USB_DEVICE(0xeb1a, 0x50a6), + .driver_info = EM2860_BOARD_GADMEI_UTV330 }, ++ { USB_DEVICE(0x1b80, 0xa340), ++ .driver_info = EM2870_BOARD_KWORLD_A340 }, + { }, + }; + MODULE_DEVICE_TABLE(usb, em28xx_id_table); +diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c +index bcd3c37..ce8a9ee 100644 +--- a/drivers/media/video/em28xx/em28xx-dvb.c ++++ b/drivers/media/video/em28xx/em28xx-dvb.c +@@ -30,11 +30,13 @@ + #include "tuner-simple.h" + + #include "lgdt330x.h" ++#include "lgdt3305.h" + #include "zl10353.h" + #include "s5h1409.h" + #include "mt352.h" + #include "mt352_priv.h" /* FIXME */ + #include "tda1002x.h" ++#include "tda18271.h" + + MODULE_DESCRIPTION("driver for em28xx based DVB cards"); + MODULE_AUTHOR("Mauro Carvalho Chehab "); +@@ -231,6 +233,18 @@ static struct lgdt330x_config em2880_lgdt3303_dev = { + .demod_chip = LGDT3303, + }; + ++static struct lgdt3305_config em2870_lgdt3304_dev = { ++ .i2c_addr = 0x0e, ++ .demod_chip = LGDT3304, ++ .spectral_inversion = 1, ++ .deny_i2c_rptr = 1, ++ .mpeg_mode = LGDT3305_MPEG_PARALLEL, ++ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE, ++ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH, ++ .vsb_if_khz = 3250, ++ .qam_if_khz = 4000, ++}; ++ + static struct zl10353_config em28xx_zl10353_with_xc3028 = { + .demod_address = (0x1e >> 1), + .no_tuner = 1, +@@ -247,6 +261,17 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = { + .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK + }; + ++static struct tda18271_std_map kworld_a340_std_map = { ++ .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 0, ++ .if_lvl = 1, .rfagc_top = 0x37, }, ++ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 1, ++ .if_lvl = 1, .rfagc_top = 0x37, }, ++}; ++ ++static struct tda18271_config kworld_a340_config = { ++ .std_map = &kworld_a340_std_map, ++}; ++ + static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { + .demod_address = (0x1e >> 1), + .no_tuner = 1, +@@ -570,6 +595,14 @@ static int dvb_init(struct em28xx *dev) + } + } + break; ++ case EM2870_BOARD_KWORLD_A340: ++ dvb->frontend = dvb_attach(lgdt3305_attach, ++ &em2870_lgdt3304_dev, ++ &dev->i2c_adap); ++ if (dvb->frontend != NULL) ++ dvb_attach(tda18271_attach, dvb->frontend, 0x60, ++ &dev->i2c_adap, &kworld_a340_config); ++ break; + default: + em28xx_errdev("/2: The frontend of your DVB/ATSC card" + " isn't supported yet\n"); +diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h +index ba6fe5d..9f3fdad 100644 +--- a/drivers/media/video/em28xx/em28xx.h ++++ b/drivers/media/video/em28xx/em28xx.h +@@ -112,6 +112,7 @@ + #define EM2870_BOARD_REDDO_DVB_C_USB_BOX 73 + #define EM2800_BOARD_VC211A 74 + #define EM2882_BOARD_DIKOM_DK300 75 ++#define EM2870_BOARD_KWORLD_A340 76 + + /* Limits minimum and default number of buffers */ + #define EM28XX_MIN_BUF 4 +-- +1.7.0.1 + diff --git a/linux-2.6-v4l-dvb-add-lgdt3304-support.patch b/linux-2.6-v4l-dvb-add-lgdt3304-support.patch new file mode 100644 index 000000000..30c50434f --- /dev/null +++ b/linux-2.6-v4l-dvb-add-lgdt3304-support.patch @@ -0,0 +1,350 @@ +From b71e18093e2e7f240797875c50c49552722f8825 Mon Sep 17 00:00:00 2001 +From: Jarod Wilson +Date: Mon, 15 Feb 2010 17:13:25 -0500 +Subject: [PATCH 1/2] dvb: add lgdt3304 support to lgdt3305 driver + +There's a currently-unused lgdt3304 demod driver, which leaves a lot to +be desired as far as functionality. The 3304 is unsurprisingly quite +similar to the 3305, and empirical testing yeilds far better results +and more complete functionality by merging 3304 support into the 3305 +driver. (For example, the current lgdt3304 driver lacks support for +signal strength, snr, ucblocks, etc., which we get w/the lgdt3305). + +For the moment, not dropping the lgdt3304 driver, and its still up to +a given device's config setup to choose which demod driver to use, but +I'd suggest dropping the 3304 driver entirely. + +As a follow-up to this patch, I've got another patch that adds support +for the KWorld PlusTV 340U (ATSC) em2870-based tuner stick, driving +its lgdt3304 demod via this lgdt3305 driver, which is what I used to +successfully test this patch with both VSB_8 and QAM_256 signals. + +A few pieces are still a touch crude, but I think its a solid start, +as well as much cleaner and more feature-complete than the existing +lgdt3304 driver. + +Signed-off-by: Jarod Wilson +--- + drivers/media/dvb/frontends/lgdt3305.c | 206 ++++++++++++++++++++++++++++++-- + drivers/media/dvb/frontends/lgdt3305.h | 6 + + 2 files changed, 203 insertions(+), 9 deletions(-) + +diff --git a/drivers/media/dvb/frontends/lgdt3305.c b/drivers/media/dvb/frontends/lgdt3305.c +index fde8c59..40695e6 100644 +--- a/drivers/media/dvb/frontends/lgdt3305.c ++++ b/drivers/media/dvb/frontends/lgdt3305.c +@@ -1,5 +1,5 @@ + /* +- * Support for LGDT3305 - VSB/QAM ++ * Support for LG Electronics LGDT3304 and LGDT3305 - VSB/QAM + * + * Copyright (C) 2008, 2009 Michael Krufky + * +@@ -357,7 +357,10 @@ static int lgdt3305_rfagc_loop(struct lgdt3305_state *state, + case QAM_256: + agcdelay = 0x046b; + rfbw = 0x8889; +- ifbw = 0x8888; ++ if (state->cfg->demod_chip == LGDT3305) ++ ifbw = 0x8888; ++ else ++ ifbw = 0x6666; + break; + default: + return -EINVAL; +@@ -409,8 +412,18 @@ static int lgdt3305_agc_setup(struct lgdt3305_state *state, + lg_dbg("lockdten = %d, acqen = %d\n", lockdten, acqen); + + /* control agc function */ +- lgdt3305_write_reg(state, LGDT3305_AGC_CTRL_4, 0xe1 | lockdten << 1); +- lgdt3305_set_reg_bit(state, LGDT3305_AGC_CTRL_1, 2, acqen); ++ switch (state->cfg->demod_chip) { ++ case LGDT3304: ++ lgdt3305_write_reg(state, 0x0314, 0xe1 | lockdten << 1); ++ lgdt3305_set_reg_bit(state, 0x030e, 2, acqen); ++ break; ++ case LGDT3305: ++ lgdt3305_write_reg(state, LGDT3305_AGC_CTRL_4, 0xe1 | lockdten << 1); ++ lgdt3305_set_reg_bit(state, LGDT3305_AGC_CTRL_1, 2, acqen); ++ break; ++ default: ++ return -EINVAL; ++ } + + return lgdt3305_rfagc_loop(state, param); + } +@@ -543,6 +556,11 @@ static int lgdt3305_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) + enable ? 0 : 1); + } + ++static int lgdt3304_sleep(struct dvb_frontend *fe) ++{ ++ return 0; ++} ++ + static int lgdt3305_sleep(struct dvb_frontend *fe) + { + struct lgdt3305_state *state = fe->demodulator_priv; +@@ -571,6 +589,55 @@ static int lgdt3305_sleep(struct dvb_frontend *fe) + return 0; + } + ++static int lgdt3304_init(struct dvb_frontend *fe) ++{ ++ struct lgdt3305_state *state = fe->demodulator_priv; ++ int ret; ++ ++ static struct lgdt3305_reg lgdt3304_init_data[] = { ++ { .reg = LGDT3305_GEN_CTRL_1, .val = 0x03, }, ++ { .reg = 0x000d, .val = 0x02, }, ++ { .reg = 0x000e, .val = 0x02, }, ++ { .reg = LGDT3305_DGTL_AGC_REF_1, .val = 0x32, }, ++ { .reg = LGDT3305_DGTL_AGC_REF_2, .val = 0xc4, }, ++ { .reg = LGDT3305_CR_CTR_FREQ_1, .val = 0x00, }, ++ { .reg = LGDT3305_CR_CTR_FREQ_2, .val = 0x00, }, ++ { .reg = LGDT3305_CR_CTR_FREQ_3, .val = 0x00, }, ++ { .reg = LGDT3305_CR_CTR_FREQ_4, .val = 0x00, }, ++ { .reg = LGDT3305_CR_CTRL_7, .val = 0xf9, }, ++ { .reg = 0x0112, .val = 0x17, }, ++ { .reg = 0x0113, .val = 0x15, }, ++ { .reg = 0x0114, .val = 0x18, }, ++ { .reg = 0x0115, .val = 0xff, }, ++ { .reg = 0x0116, .val = 0x3c, }, ++ { .reg = 0x0214, .val = 0x67, }, ++ { .reg = 0x0424, .val = 0x8d, }, ++ { .reg = 0x0427, .val = 0x12, }, ++ { .reg = 0x0428, .val = 0x4f, }, ++ { .reg = LGDT3305_IFBW_1, .val = 0x80, }, ++ { .reg = LGDT3305_IFBW_2, .val = 0x00, }, ++ { .reg = 0x030a, .val = 0x08, }, ++ { .reg = 0x030b, .val = 0x9b, }, ++ { .reg = 0x030d, .val = 0x00, }, ++ { .reg = 0x030e, .val = 0x1c, }, ++ { .reg = 0x0314, .val = 0xe1, }, ++ { .reg = 0x000d, .val = 0x82, }, ++ { .reg = LGDT3305_TP_CTRL_1, .val = 0x5b, }, ++ { .reg = LGDT3305_TP_CTRL_1, .val = 0x5b, }, ++ }; ++ ++ lg_dbg("\n"); ++ ++ ret = lgdt3305_write_regs(state, lgdt3304_init_data, ++ ARRAY_SIZE(lgdt3304_init_data)); ++ if (lg_fail(ret)) ++ goto fail; ++ ++ ret = lgdt3305_soft_reset(state); ++fail: ++ return ret; ++} ++ + static int lgdt3305_init(struct dvb_frontend *fe) + { + struct lgdt3305_state *state = fe->demodulator_priv; +@@ -639,6 +706,88 @@ fail: + return ret; + } + ++static int lgdt3304_set_parameters(struct dvb_frontend *fe, ++ struct dvb_frontend_parameters *param) ++{ ++ struct lgdt3305_state *state = fe->demodulator_priv; ++ int ret; ++ ++ lg_dbg("(%d, %d)\n", param->frequency, param->u.vsb.modulation); ++ ++ if (fe->ops.tuner_ops.set_params) { ++ ret = fe->ops.tuner_ops.set_params(fe, param); ++ if (fe->ops.i2c_gate_ctrl) ++ fe->ops.i2c_gate_ctrl(fe, 0); ++ if (lg_fail(ret)) ++ goto fail; ++ state->current_frequency = param->frequency; ++ } ++ ++ ret = lgdt3305_set_modulation(state, param); ++ if (lg_fail(ret)) ++ goto fail; ++ ++ ret = lgdt3305_passband_digital_agc(state, param); ++ if (lg_fail(ret)) ++ goto fail; ++ ++ ret = lgdt3305_agc_setup(state, param); ++ if (lg_fail(ret)) ++ goto fail; ++ ++ /* reg 0x030d is 3304-only... seen in vsb and qam usbsnoops... */ ++ switch (param->u.vsb.modulation) { ++ case VSB_8: ++ lgdt3305_write_reg(state, 0x030d, 0x00); ++#if 1 ++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_1, 0x4f); ++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_2, 0x0c); ++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_3, 0xac); ++ lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_4, 0xba); ++#endif ++ break; ++ case QAM_64: ++ case QAM_256: ++ lgdt3305_write_reg(state, 0x030d, 0x14); ++#if 1 ++ ret = lgdt3305_set_if(state, param); ++ if (lg_fail(ret)) ++ goto fail; ++#endif ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++#if 0 ++ /* the set_if vsb formula doesn't work for the 3304, we end up sending ++ * 0x40851e07 instead of 0x4f0cacba (which works back to 94050, rather ++ * than 3250, in the case of the kworld 340u) */ ++ ret = lgdt3305_set_if(state, param); ++ if (lg_fail(ret)) ++ goto fail; ++#endif ++ ++ ret = lgdt3305_spectral_inversion(state, param, ++ state->cfg->spectral_inversion ++ ? 1 : 0); ++ if (lg_fail(ret)) ++ goto fail; ++ ++ state->current_modulation = param->u.vsb.modulation; ++ ++ ret = lgdt3305_mpeg_mode(state, state->cfg->mpeg_mode); ++ if (lg_fail(ret)) ++ goto fail; ++ ++ /* lgdt3305_mpeg_mode_polarity calls lgdt3305_soft_reset */ ++ ret = lgdt3305_mpeg_mode_polarity(state, ++ state->cfg->tpclk_edge, ++ state->cfg->tpvalid_polarity); ++fail: ++ return ret; ++} ++ + static int lgdt3305_set_parameters(struct dvb_frontend *fe, + struct dvb_frontend_parameters *param) + { +@@ -847,6 +996,10 @@ static int lgdt3305_read_status(struct dvb_frontend *fe, fe_status_t *status) + switch (state->current_modulation) { + case QAM_256: + case QAM_64: ++#if 0 /* needed w/3304 to set FE_HAS_SIGNAL */ ++ if (cr_lock) ++ *status |= FE_HAS_SIGNAL; ++#endif + ret = lgdt3305_read_fec_lock_status(state, &fec_lock); + if (lg_fail(ret)) + goto fail; +@@ -992,6 +1145,7 @@ static void lgdt3305_release(struct dvb_frontend *fe) + kfree(state); + } + ++static struct dvb_frontend_ops lgdt3304_ops; + static struct dvb_frontend_ops lgdt3305_ops; + + struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config, +@@ -1012,11 +1166,21 @@ struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config, + state->cfg = config; + state->i2c_adap = i2c_adap; + +- memcpy(&state->frontend.ops, &lgdt3305_ops, +- sizeof(struct dvb_frontend_ops)); ++ switch (config->demod_chip) { ++ case LGDT3304: ++ memcpy(&state->frontend.ops, &lgdt3304_ops, ++ sizeof(struct dvb_frontend_ops)); ++ break; ++ case LGDT3305: ++ memcpy(&state->frontend.ops, &lgdt3305_ops, ++ sizeof(struct dvb_frontend_ops)); ++ break; ++ default: ++ goto fail; ++ } + state->frontend.demodulator_priv = state; + +- /* verify that we're talking to a lg dt3305 */ ++ /* verify that we're talking to a lg dt3304/5 */ + ret = lgdt3305_read_reg(state, LGDT3305_GEN_CTRL_2, &val); + if ((lg_fail(ret)) | (val == 0)) + goto fail; +@@ -1035,12 +1199,36 @@ struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config, + + return &state->frontend; + fail: +- lg_warn("unable to detect LGDT3305 hardware\n"); ++ lg_warn("unable to detect %s hardware\n", ++ config->demod_chip ? "LGDT3304" : "LGDT3305"); + kfree(state); + return NULL; + } + EXPORT_SYMBOL(lgdt3305_attach); + ++static struct dvb_frontend_ops lgdt3304_ops = { ++ .info = { ++ .name = "LG Electronics LGDT3304 VSB/QAM Frontend", ++ .type = FE_ATSC, ++ .frequency_min = 54000000, ++ .frequency_max = 858000000, ++ .frequency_stepsize = 62500, ++ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB ++ }, ++ .i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl, ++ .init = lgdt3304_init, ++ .sleep = lgdt3304_sleep, ++ .set_frontend = lgdt3304_set_parameters, ++ .get_frontend = lgdt3305_get_frontend, ++ .get_tune_settings = lgdt3305_get_tune_settings, ++ .read_status = lgdt3305_read_status, ++ .read_ber = lgdt3305_read_ber, ++ .read_signal_strength = lgdt3305_read_signal_strength, ++ .read_snr = lgdt3305_read_snr, ++ .read_ucblocks = lgdt3305_read_ucblocks, ++ .release = lgdt3305_release, ++}; ++ + static struct dvb_frontend_ops lgdt3305_ops = { + .info = { + .name = "LG Electronics LGDT3305 VSB/QAM Frontend", +@@ -1064,7 +1252,7 @@ static struct dvb_frontend_ops lgdt3305_ops = { + .release = lgdt3305_release, + }; + +-MODULE_DESCRIPTION("LG Electronics LGDT3305 ATSC/QAM-B Demodulator Driver"); ++MODULE_DESCRIPTION("LG Electronics LGDT3304/5 ATSC/QAM-B Demodulator Driver"); + MODULE_AUTHOR("Michael Krufky "); + MODULE_LICENSE("GPL"); + MODULE_VERSION("0.1"); +diff --git a/drivers/media/dvb/frontends/lgdt3305.h b/drivers/media/dvb/frontends/lgdt3305.h +index 9cb11c9..a7f30c2 100644 +--- a/drivers/media/dvb/frontends/lgdt3305.h ++++ b/drivers/media/dvb/frontends/lgdt3305.h +@@ -41,6 +41,11 @@ enum lgdt3305_tp_valid_polarity { + LGDT3305_TP_VALID_HIGH = 1, + }; + ++enum lgdt_demod_chip_type { ++ LGDT3305 = 0, ++ LGDT3304 = 1, ++}; ++ + struct lgdt3305_config { + u8 i2c_addr; + +@@ -65,6 +70,7 @@ struct lgdt3305_config { + enum lgdt3305_mpeg_mode mpeg_mode; + enum lgdt3305_tp_clock_edge tpclk_edge; + enum lgdt3305_tp_valid_polarity tpvalid_polarity; ++ enum lgdt_demod_chip_type demod_chip; + }; + + #if defined(CONFIG_DVB_LGDT3305) || (defined(CONFIG_DVB_LGDT3305_MODULE) && \ +-- +1.6.6 + diff --git a/linux-2.6-v4l-dvb-experimental.patch b/linux-2.6-v4l-dvb-experimental.patch new file mode 100644 index 000000000..e69de29bb diff --git a/linux-2.6-v4l-dvb-fixes.patch b/linux-2.6-v4l-dvb-fixes.patch new file mode 100644 index 000000000..e69de29bb diff --git a/linux-2.6-v4l-dvb-gspca-fixes.patch b/linux-2.6-v4l-dvb-gspca-fixes.patch new file mode 100644 index 000000000..dbf1b56ff --- /dev/null +++ b/linux-2.6-v4l-dvb-gspca-fixes.patch @@ -0,0 +1,3849 @@ +From: German Galkin +Date: Sun, 7 Mar 2010 09:19:02 +0000 (-0300) +Subject: V4L/DVB: gspca - sn9c20x: Fix exposure control for HV7131R sensor +X-Git-Url: http://git.linuxtv.org/v4l-dvb.git?a=commitdiff_plain;h=0c05cee69fbef74a08f4f9804f0a3ca7ba39a3e2 + +V4L/DVB: gspca - sn9c20x: Fix exposure control for HV7131R sensor + +Make the range of exposure values (0-0x1770) distribute evenly through +HV7131R's exposure control bytes. + +Signed-off-by: German Galkin +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index 4a1bc08..dce5ef8 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -1654,9 +1654,9 @@ static int set_exposure(struct gspca_dev *gspca_dev) + case SENSOR_HV7131R: + exp[0] |= (4 << 4); + exp[2] = 0x25; +- exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16; +- exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8; +- exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff; ++ exp[3] = (sd->exposure >> 5) & 0xff; ++ exp[4] = (sd->exposure << 3) & 0xff; ++ exp[5] = 0; + break; + default: + return 0; +From 1f6d33db111022345a85d8f441a22a5a46fc8ca2 Mon Sep 17 00:00:00 2001 +From: Brian Johnson +Date: Tue, 9 Mar 2010 19:53:05 -0500 +Subject: [PATCH] gspca - sn9c20x: use gspca's input device handling + +Drop custom code for handling the input button in +favor of using gspca's input hanlding mechinism. +--- + drivers/media/video/gspca/Kconfig | 6 -- + drivers/media/video/gspca/sn9c20x.c | 145 ++++++++--------------------------- + 2 files changed, 33 insertions(+), 118 deletions(-) + +diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig +index e0060c1..5d920e5 100644 +--- a/drivers/media/video/gspca/Kconfig ++++ b/drivers/media/video/gspca/Kconfig +@@ -172,12 +172,6 @@ config USB_GSPCA_SN9C20X + To compile this driver as a module, choose M here: the + module will be called gspca_sn9c20x. + +-config USB_GSPCA_SN9C20X_EVDEV +- bool "Enable evdev support" +- depends on USB_GSPCA_SN9C20X && INPUT +- ---help--- +- Say Y here in order to enable evdev support for sn9c20x webcam button. +- + config USB_GSPCA_SONIXB + tristate "SONIX Bayer USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index dce5ef8..d2a4902 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -18,10 +18,7 @@ + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +-#include +-#include +-#include ++#ifdef CONFIG_INPUT + #include + #include + #endif +@@ -54,6 +51,9 @@ MODULE_LICENSE("GPL"); + #define SENSOR_HV7131R 10 + #define SENSOR_MT9VPRB 20 + ++/* camera flags */ ++#define HAS_BUTTON 0x1 ++ + /* specific webcam descriptor */ + struct sd { + struct gspca_dev gspca_dev; +@@ -87,11 +87,7 @@ struct sd { + u8 *jpeg_hdr; + u8 quality; + +-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +- struct input_dev *input_dev; +- u8 input_gpio; +- struct task_struct *input_task; +-#endif ++ u8 flags; + }; + + struct i2c_reg_u8 { +@@ -1420,87 +1416,6 @@ static int hv7131r_init_sensor(struct gspca_dev *gspca_dev) + return 0; + } + +-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +-static int input_kthread(void *data) +-{ +- struct gspca_dev *gspca_dev = (struct gspca_dev *)data; +- struct sd *sd = (struct sd *) gspca_dev; +- +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait); +- set_freezable(); +- for (;;) { +- if (kthread_should_stop()) +- break; +- +- if (reg_r(gspca_dev, 0x1005, 1) < 0) +- continue; +- +- input_report_key(sd->input_dev, +- KEY_CAMERA, +- gspca_dev->usb_buf[0] & sd->input_gpio); +- input_sync(sd->input_dev); +- +- wait_event_freezable_timeout(wait, +- kthread_should_stop(), +- msecs_to_jiffies(100)); +- } +- return 0; +-} +- +- +-static int sn9c20x_input_init(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- if (sd->input_gpio == 0) +- return 0; +- +- sd->input_dev = input_allocate_device(); +- if (!sd->input_dev) +- return -ENOMEM; +- +- sd->input_dev->name = "SN9C20X Webcam"; +- +- sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s", +- gspca_dev->dev->bus->bus_name, +- gspca_dev->dev->devpath); +- +- if (!sd->input_dev->phys) +- return -ENOMEM; +- +- usb_to_input_id(gspca_dev->dev, &sd->input_dev->id); +- sd->input_dev->dev.parent = &gspca_dev->dev->dev; +- +- set_bit(EV_KEY, sd->input_dev->evbit); +- set_bit(KEY_CAMERA, sd->input_dev->keybit); +- +- if (input_register_device(sd->input_dev)) +- return -EINVAL; +- +- sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%s-%s", +- gspca_dev->dev->bus->bus_name, +- gspca_dev->dev->devpath); +- +- if (IS_ERR(sd->input_task)) +- return -EINVAL; +- +- return 0; +-} +- +-static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- if (sd->input_task != NULL && !IS_ERR(sd->input_task)) +- kthread_stop(sd->input_task); +- +- if (sd->input_dev != NULL) { +- input_unregister_device(sd->input_dev); +- kfree(sd->input_dev->phys); +- input_free_device(sd->input_dev); +- sd->input_dev = NULL; +- } +-} +-#endif +- + static int set_cmatrix(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -2004,6 +1919,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + + sd->sensor = (id->driver_info >> 8) & 0xff; + sd->i2c_addr = id->driver_info & 0xff; ++ sd->flags = (id->driver_info >> 16) & 0xff; + + switch (sd->sensor) { + case SENSOR_MT9M111: +@@ -2038,11 +1954,6 @@ static int sd_config(struct gspca_dev *gspca_dev, + + sd->quality = 95; + +-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +- sd->input_gpio = (id->driver_info >> 16) & 0xff; +- if (sn9c20x_input_init(gspca_dev) < 0) +- return -ENODEV; +-#endif + return 0; + } + +@@ -2342,6 +2253,24 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev) + do_autoexposure(gspca_dev, avg_lum); + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet */ ++ int len) /* interrupt packet length */ ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret = -EINVAL; ++ if (sd->flags & HAS_BUTTON && len == 1) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ return ret; ++} ++#endif ++ + static void sd_pkt_scan(struct gspca_dev *gspca_dev, + u8 *data, /* isoc packet */ + int len) /* iso packet length */ +@@ -2408,6 +2337,9 @@ static const struct sd_desc sd_desc = { + .stopN = sd_stopN, + .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + .dq_callback = sd_dqcallback, + #ifdef CONFIG_VIDEO_ADV_DEBUG + .set_register = sd_dbg_s_register, +@@ -2416,8 +2348,8 @@ static const struct sd_desc sd_desc = { + .get_chip_ident = sd_chip_ident, + }; + +-#define SN9C20X(sensor, i2c_addr, button_mask) \ +- .driver_info = (button_mask << 16) \ ++#define SN9C20X(sensor, i2c_addr, flags) \ ++ .driver_info = (flags << 16) \ + | (SENSOR_ ## sensor << 8) \ + | (i2c_addr) + +@@ -2425,7 +2357,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, +- {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)}, ++ {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, HAS_BUTTON)}, + {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, +@@ -2436,13 +2368,13 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)}, +- {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)}, ++ {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, HAS_BUTTON)}, + {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)}, + {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)}, +- {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, 0)}, ++ {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, HAS_BUTTON)}, + {USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, +@@ -2451,7 +2383,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)}, +- {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)}, ++ {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, HAS_BUTTON)}, + {USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)}, + {} +@@ -2466,22 +2398,11 @@ static int sd_probe(struct usb_interface *intf, + THIS_MODULE); + } + +-static void sd_disconnect(struct usb_interface *intf) +-{ +-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +- struct gspca_dev *gspca_dev = usb_get_intfdata(intf); +- +- sn9c20x_input_cleanup(gspca_dev); +-#endif +- +- gspca_disconnect(intf); +-} +- + static struct usb_driver sd_driver = { + .name = MODULE_NAME, + .id_table = device_table, + .probe = sd_probe, +- .disconnect = sd_disconnect, ++ .disconnect = gspca_disconnect, + #ifdef CONFIG_PM + .suspend = gspca_suspend, + .resume = gspca_resume, +-- +1.6.1 + +From ed53494aab1eadefa6b6c1413d59805839fb6fcf Mon Sep 17 00:00:00 2001 +From: Brian Johnson +Date: Tue, 9 Mar 2010 19:53:12 -0500 +Subject: [PATCH] gspca - sn9c20x: Add support for camera LEDs + +--- + drivers/media/video/gspca/sn9c20x.c | 18 +++++++++++++++--- + 1 files changed, 15 insertions(+), 3 deletions(-) + +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index d2a4902..0e8a64d 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -53,6 +53,7 @@ MODULE_LICENSE("GPL"); + + /* camera flags */ + #define HAS_BUTTON 0x1 ++#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */ + + /* specific webcam descriptor */ + struct sd { +@@ -730,7 +731,8 @@ static u16 bridge_init[][2] = { + {0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f}, + {0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f}, + {0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01}, +- {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80} ++ {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80}, ++ {0x1007, 0x00} + }; + + /* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */ +@@ -1973,6 +1975,11 @@ static int sd_init(struct gspca_dev *gspca_dev) + } + } + ++ if (sd->flags & LED_REVERSE) ++ reg_w1(gspca_dev, 0x1006, 0x00); ++ else ++ reg_w1(gspca_dev, 0x1006, 0x20); ++ + if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) { + err("Device initialization failed"); + return -ENODEV; +@@ -2153,6 +2160,8 @@ static int sd_start(struct gspca_dev *gspca_dev) + set_exposure(gspca_dev); + set_hvflip(gspca_dev); + ++ reg_w1(gspca_dev, 0x1007, 0x20); ++ + reg_r(gspca_dev, 0x1061, 1); + reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02); + return 0; +@@ -2160,6 +2169,8 @@ static int sd_start(struct gspca_dev *gspca_dev) + + static void sd_stopN(struct gspca_dev *gspca_dev) + { ++ reg_w1(gspca_dev, 0x1007, 0x00); ++ + reg_r(gspca_dev, 0x1061, 1); + reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02); + } +@@ -2349,7 +2360,7 @@ static const struct sd_desc sd_desc = { + }; + + #define SN9C20X(sensor, i2c_addr, flags) \ +- .driver_info = (flags << 16) \ ++ .driver_info = ((flags & 0xff) << 16) \ + | (SENSOR_ ## sensor << 8) \ + | (i2c_addr) + +@@ -2357,7 +2368,8 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, +- {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, HAS_BUTTON)}, ++ {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, ++ (HAS_BUTTON | LED_REVERSE))}, + {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, +-- +1.6.1 + +From c1dfdd5443097d4fbd773858a1c38d7b6b7dfcc1 Mon Sep 17 00:00:00 2001 +From: Brian Johnson +Date: Tue, 9 Mar 2010 19:55:51 -0500 +Subject: [PATCH] gspca - sn9c20x: Add upside down detection + +Add support for detecting webcams that are mounted +upside down in laptops. Currently the only two known +are two MSI modesl using the 0c45:624f. +--- + drivers/media/video/gspca/sn9c20x.c | 50 ++++++++++++++++++++++++++++------- + 1 files changed, 40 insertions(+), 10 deletions(-) + +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index 0e8a64d..41c8916 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -26,6 +26,7 @@ + #include "jpeg.h" + + #include ++#include + + MODULE_AUTHOR("Brian Johnson , " + "microdia project "); +@@ -54,6 +55,7 @@ MODULE_LICENSE("GPL"); + /* camera flags */ + #define HAS_BUTTON 0x1 + #define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */ ++#define FLIP_DETECT 0x4 + + /* specific webcam descriptor */ + struct sd { +@@ -126,6 +128,25 @@ static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val); + static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val); + static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val); + ++static const struct dmi_system_id flip_dmi_table[] = { ++ { ++ .ident = "MSI MS-1034", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1034"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "0341") ++ } ++ }, ++ { ++ .ident = "MSI MS-1632", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), ++ DMI_MATCH(DMI_BOARD_NAME, "MS-1632") ++ } ++ }, ++ {} ++}; ++ + static const struct ctrl sd_ctrls[] = { + { + #define BRIGHTNESS_IDX 0 +@@ -1495,17 +1516,26 @@ static int set_redblue(struct gspca_dev *gspca_dev) + + static int set_hvflip(struct gspca_dev *gspca_dev) + { +- u8 value, tslb; ++ u8 value, tslb, hflip, vflip; + u16 value2; + struct sd *sd = (struct sd *) gspca_dev; ++ ++ if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) { ++ hflip = !sd->hflip; ++ vflip = !sd->vflip; ++ } else { ++ hflip = sd->hflip; ++ vflip = sd->vflip; ++ } ++ + switch (sd->sensor) { + case SENSOR_OV9650: + i2c_r1(gspca_dev, 0x1e, &value); + value &= ~0x30; + tslb = 0x01; +- if (sd->hflip) ++ if (hflip) + value |= 0x20; +- if (sd->vflip) { ++ if (vflip) { + value |= 0x10; + tslb = 0x49; + } +@@ -1516,9 +1546,9 @@ static int set_hvflip(struct gspca_dev *gspca_dev) + case SENSOR_MT9V011: + i2c_r2(gspca_dev, 0x20, &value2); + value2 &= ~0xc0a0; +- if (sd->hflip) ++ if (hflip) + value2 |= 0x8080; +- if (sd->vflip) ++ if (vflip) + value2 |= 0x4020; + i2c_w2(gspca_dev, 0x20, value2); + break; +@@ -1526,18 +1556,18 @@ static int set_hvflip(struct gspca_dev *gspca_dev) + case SENSOR_MT9V112: + i2c_r2(gspca_dev, 0x20, &value2); + value2 &= ~0x0003; +- if (sd->hflip) ++ if (hflip) + value2 |= 0x0002; +- if (sd->vflip) ++ if (vflip) + value2 |= 0x0001; + i2c_w2(gspca_dev, 0x20, value2); + break; + case SENSOR_HV7131R: + i2c_r1(gspca_dev, 0x01, &value); + value &= ~0x03; +- if (sd->vflip) ++ if (vflip) + value |= 0x01; +- if (sd->hflip) ++ if (hflip) + value |= 0x02; + i2c_w1(gspca_dev, 0x01, value); + break; +@@ -2370,7 +2400,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, + (HAS_BUTTON | LED_REVERSE))}, +- {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)}, ++ {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, FLIP_DETECT)}, + {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)}, +-- +1.6.1 + +From 2e25ca5a85c702d2d49df7f5b1e3ecd61e01bf3e Mon Sep 17 00:00:00 2001 +From: Brian Johnson +Date: Tue, 9 Mar 2010 19:55:59 -0500 +Subject: [PATCH] gspca - sn9c20x: Add support for cameras using the MT9M112 sensor + +Adds the following models: + - 0c45:624c + - 0c45:628c + - 0458:704a + - 0458:704c +--- + Documentation/video4linux/gspca.txt | 4 +++ + drivers/media/video/gspca/sn9c20x.c | 44 ++++++++++++++++++++++++++++++++-- + 2 files changed, 45 insertions(+), 3 deletions(-) + +diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt +index 181b9e6..3bfab2e 100644 +--- a/Documentation/video4linux/gspca.txt ++++ b/Documentation/video4linux/gspca.txt +@@ -50,6 +50,8 @@ zc3xx 0458:700f Genius VideoCam Web V2 + sonixj 0458:7025 Genius Eye 311Q + sn9c20x 0458:7029 Genius Look 320s + sonixj 0458:702e Genius Slim 310 NB ++sn9c20x 0458:704a Genius Slim 1320 ++sn9c20x 0458:704c Genius i-Look 1321 + sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650) + sonixj 045e:00f5 MicroSoft VX3000 + sonixj 045e:00f7 MicroSoft VX1000 +@@ -311,6 +313,7 @@ sonixj 0c45:614a Frontech E-Ccam (JIL-2225) + sn9c20x 0c45:6240 PC Camera (SN9C201 + MT9M001) + sn9c20x 0c45:6242 PC Camera (SN9C201 + MT9M111) + sn9c20x 0c45:6248 PC Camera (SN9C201 + OV9655) ++sn9c20x 0c45:624c PC Camera (SN9C201 + MT9M112) + sn9c20x 0c45:624e PC Camera (SN9C201 + SOI968) + sn9c20x 0c45:624f PC Camera (SN9C201 + OV9650) + sn9c20x 0c45:6251 PC Camera (SN9C201 + OV9650) +@@ -323,6 +326,7 @@ sn9c20x 0c45:627f PC Camera (SN9C201 + OV9650) + sn9c20x 0c45:6280 PC Camera (SN9C202 + MT9M001) + sn9c20x 0c45:6282 PC Camera (SN9C202 + MT9M111) + sn9c20x 0c45:6288 PC Camera (SN9C202 + OV9655) ++sn9c20x 0c45:628c PC Camera (SN9C201 + MT9M112) + sn9c20x 0c45:628e PC Camera (SN9C202 + SOI968) + sn9c20x 0c45:628f PC Camera (SN9C202 + OV9650) + sn9c20x 0c45:62a0 PC Camera (SN9C202 + OV7670) +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index 41c8916..8684596 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -49,7 +49,8 @@ MODULE_LICENSE("GPL"); + #define SENSOR_MT9V112 7 + #define SENSOR_MT9M001 8 + #define SENSOR_MT9M111 9 +-#define SENSOR_HV7131R 10 ++#define SENSOR_MT9M112 10 ++#define SENSOR_HV7131R 11 + #define SENSOR_MT9VPRB 20 + + /* camera flags */ +@@ -730,6 +731,7 @@ static u16 i2c_ident[] = { + V4L2_IDENT_MT9V112, + V4L2_IDENT_MT9M001C12ST, + V4L2_IDENT_MT9M111, ++ V4L2_IDENT_MT9M112, + V4L2_IDENT_HV7131R, + }; + +@@ -1061,6 +1063,13 @@ static struct i2c_reg_u16 mt9m111_init[] = { + {0xf0, 0x0000}, + }; + ++static struct i2c_reg_u16 mt9m112_init[] = { ++ {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008}, ++ {0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300}, ++ {0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e}, ++ {0xf0, 0x0000}, ++}; ++ + static struct i2c_reg_u8 hv7131r_init[] = { + {0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08}, + {0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0}, +@@ -1387,6 +1396,23 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev) + return -ENODEV; + } + ++static int mt9m112_init_sensor(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int i; ++ for (i = 0; i < ARRAY_SIZE(mt9m112_init); i++) { ++ if (i2c_w2(gspca_dev, mt9m112_init[i].reg, ++ mt9m112_init[i].val) < 0) { ++ err("MT9M112 sensor initialization failed"); ++ return -ENODEV; ++ } ++ } ++ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX); ++ sd->hstart = 0; ++ sd->vstart = 2; ++ return 0; ++} ++ + static int mt9m111_init_sensor(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -1552,6 +1578,7 @@ static int set_hvflip(struct gspca_dev *gspca_dev) + value2 |= 0x4020; + i2c_w2(gspca_dev, 0x20, value2); + break; ++ case SENSOR_MT9M112: + case SENSOR_MT9M111: + case SENSOR_MT9V112: + i2c_r2(gspca_dev, 0x20, &value2); +@@ -1877,7 +1904,7 @@ static int sd_dbg_g_register(struct gspca_dev *gspca_dev, + if (reg->match.addr != sd->i2c_addr) + return -EINVAL; + if (sd->sensor >= SENSOR_MT9V011 && +- sd->sensor <= SENSOR_MT9M111) { ++ sd->sensor <= SENSOR_MT9M112) { + if (i2c_r2(gspca_dev, reg->reg, (u16 *)®->val) < 0) + return -EINVAL; + } else { +@@ -1906,7 +1933,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev, + if (reg->match.addr != sd->i2c_addr) + return -EINVAL; + if (sd->sensor >= SENSOR_MT9V011 && +- sd->sensor <= SENSOR_MT9M111) { ++ sd->sensor <= SENSOR_MT9M112) { + if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + } else { +@@ -1954,6 +1981,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->flags = (id->driver_info >> 16) & 0xff; + + switch (sd->sensor) { ++ case SENSOR_MT9M112: + case SENSOR_MT9M111: + case SENSOR_OV9650: + case SENSOR_SOI968: +@@ -2050,6 +2078,11 @@ static int sd_init(struct gspca_dev *gspca_dev) + return -ENODEV; + info("MT9M111 sensor detected"); + break; ++ case SENSOR_MT9M112: ++ if (mt9m112_init_sensor(gspca_dev) < 0) ++ return -ENODEV; ++ info("MT9M112 sensor detected"); ++ break; + case SENSOR_MT9M001: + if (mt9m001_init_sensor(gspca_dev) < 0) + return -ENODEV; +@@ -2109,6 +2142,7 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode) + i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40); + } + break; ++ case SENSOR_MT9M112: + case SENSOR_MT9M111: + if (mode & MODE_SXGA) { + i2c_w2(gspca_dev, 0xf0, 0x0002); +@@ -2398,6 +2432,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, ++ {USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, + (HAS_BUTTON | LED_REVERSE))}, + {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, FLIP_DETECT)}, +@@ -2411,6 +2446,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, HAS_BUTTON)}, ++ {USB_DEVICE(0x0c45, 0x628c), SN9C20X(MT9M112, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)}, +@@ -2421,6 +2457,8 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)}, ++ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)}, ++ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)}, + {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)}, +-- +1.6.1 + +From 9a4ef004b953572b479c643f2314ea352b74d732 Mon Sep 17 00:00:00 2001 +From: Brian Johnson +Date: Tue, 9 Mar 2010 19:57:55 -0500 +Subject: [PATCH] gspca - sn9c20x: Fix bug with OV9655 code + +Fixed buggy init sequence for the OV9655 sensor. +Tested with a 0c45:6288 and 0c45:62b3. +--- + drivers/media/video/gspca/sn9c20x.c | 58 ++++++++++++++-------------------- + 1 files changed, 24 insertions(+), 34 deletions(-) + +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index 8684596..69a3aa3 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -934,40 +934,30 @@ static struct i2c_reg_u8 ov9650_init[] = { + }; + + static struct i2c_reg_u8 ov9655_init[] = { +- {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61}, +- {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24}, +- {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08}, +- {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf}, +- {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12}, +- {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, +- {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, +- {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, +- {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, +- {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04}, +- {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02}, +- {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c}, +- {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60}, +- {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04}, +- {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80}, +- {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf}, +- {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b}, +- {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01}, ++ {0x12, 0x80}, {0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba}, ++ {0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08}, ++ {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d}, ++ {0x35, 0x00}, {0x38, 0x12}, {0x0f, 0x42}, {0x39, 0x57}, ++ {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, {0x3d, 0x19}, ++ {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, {0x42, 0x80}, ++ {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, {0x48, 0x3c}, ++ {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, {0x4d, 0xdc}, ++ {0x4e, 0xdc}, {0x6c, 0x04}, {0x6f, 0x9e}, {0x70, 0x05}, ++ {0x71, 0x78}, {0x77, 0x02}, {0x8a, 0x23}, {0x90, 0x7e}, ++ {0x91, 0x7c}, {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, ++ {0xa6, 0x60}, {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, ++ {0xab, 0x04}, {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, ++ {0xaf, 0x80}, {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, ++ {0xb6, 0xaf}, {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, ++ {0xbe, 0x3b}, {0xbf, 0x3a}, {0xc1, 0xc8}, {0xc2, 0x01}, + {0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0}, +- {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61}, ++ {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x2d, 0x00}, ++ {0x2e, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x12, 0x61}, + {0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a}, +- {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01}, +- {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10}, +- {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04}, +- {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00}, +- {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80}, +- {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d}, +- {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14}, +- {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf}, +- {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00}, +- {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00}, +- {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03}, +- {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03}, +- {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13}, ++ {0x03, 0x09}, {0x17, 0x16}, {0x18, 0x6e}, {0x19, 0x01}, ++ {0x1a, 0x3e}, {0x32, 0x09}, {0x2a, 0x10}, {0x2b, 0x0a}, ++ {0x92, 0x00}, {0x93, 0x00}, {0xa1, 0x00}, {0x10, 0x7c}, ++ {0x04, 0x03}, {0x00, 0x13}, + }; + + static struct i2c_reg_u16 mt9v112_init[] = { +@@ -1267,8 +1257,8 @@ static int ov9655_init_sensor(struct gspca_dev *gspca_dev) + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); +- sd->hstart = 0; +- sd->vstart = 7; ++ sd->hstart = 1; ++ sd->vstart = 2; + return 0; + } + +-- +1.6.1 + +From: Max Thrun +Date: Sat, 27 Feb 2010 20:20:18 +0000 (-0300) +Subject: V4L/DVB: gspca- ov534: Remove ambiguous controls +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=c8e6b4f9f77d44edef3ef9ffd25957a204beb444 + +V4L/DVB: gspca- ov534: Remove ambiguous controls + +Remove Blue/Red Channel Target Value, they are meant for Black Level +Calibration but it is not completely clear how to use them. + +Signed-off-by: Max Thrun +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 957e05e..2a0e8a3 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -60,8 +60,6 @@ struct sd { + u8 contrast; + u8 gain; + u8 exposure; +- u8 redblc; +- u8 blueblc; + u8 hue; + u8 autogain; + u8 awb; +@@ -76,10 +74,6 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); +-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val); +-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val); +-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val); +-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); +@@ -156,34 +150,6 @@ static const struct ctrl sd_ctrls[] = { + }, + { /* 4 */ + { +- .id = V4L2_CID_RED_BALANCE, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Red Balance", +- .minimum = 0, +- .maximum = 255, +- .step = 1, +-#define RED_BALANCE_DEF 128 +- .default_value = RED_BALANCE_DEF, +- }, +- .set = sd_setredblc, +- .get = sd_getredblc, +- }, +- { /* 5 */ +- { +- .id = V4L2_CID_BLUE_BALANCE, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Blue Balance", +- .minimum = 0, +- .maximum = 255, +- .step = 1, +-#define BLUE_BALANCE_DEF 128 +- .default_value = BLUE_BALANCE_DEF, +- }, +- .set = sd_setblueblc, +- .get = sd_getblueblc, +- }, +- { /* 6 */ +- { + .id = V4L2_CID_HUE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Hue", +@@ -196,7 +162,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_sethue, + .get = sd_gethue, + }, +- { /* 7 */ ++ { /* 5 */ + { + .id = V4L2_CID_AUTOGAIN, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -210,8 +176,8 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setautogain, + .get = sd_getautogain, + }, +-#define AWB_IDX 8 +- { /* 8 */ ++#define AWB_IDX 6 ++ { /* 6 */ + { + .id = V4L2_CID_AUTO_WHITE_BALANCE, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -225,7 +191,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setawb, + .get = sd_getawb, + }, +- { /* 9 */ ++ { /* 7 */ + { + .id = V4L2_CID_SHARPNESS, + .type = V4L2_CTRL_TYPE_INTEGER, +@@ -239,7 +205,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setsharpness, + .get = sd_getsharpness, + }, +- { /* 10 */ ++ { /* 8 */ + { + .id = V4L2_CID_HFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -253,7 +219,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_sethflip, + .get = sd_gethflip, + }, +- { /* 11 */ ++ { /* 9 */ + { + .id = V4L2_CID_VFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -722,20 +688,6 @@ static void setexposure(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x10, val << 1); + } + +-static void setredblc(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sccb_reg_write(gspca_dev, 0x43, sd->redblc); +-} +- +-static void setblueblc(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sccb_reg_write(gspca_dev, 0x42, sd->blueblc); +-} +- + static void sethue(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -825,8 +777,6 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->contrast = CONTRAST_DEF; + sd->gain = GAIN_DEF; + sd->exposure = EXPO_DEF; +- sd->redblc = RED_BALANCE_DEF; +- sd->blueblc = BLUE_BALANCE_DEF; + sd->hue = HUE_DEF; + #if AUTOGAIN_DEF != 0 + sd->autogain = AUTOGAIN_DEF; +@@ -907,8 +857,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + setautogain(gspca_dev); + setawb(gspca_dev); + setgain(gspca_dev); +- setredblc(gspca_dev); +- setblueblc(gspca_dev); + sethue(gspca_dev); + setexposure(gspca_dev); + setbrightness(gspca_dev); +@@ -1092,42 +1040,6 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + +-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sd->redblc = val; +- if (gspca_dev->streaming) +- setredblc(gspca_dev); +- return 0; +-} +- +-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- *val = sd->redblc; +- return 0; +-} +- +-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sd->blueblc = val; +- if (gspca_dev->streaming) +- setblueblc(gspca_dev); +- return 0; +-} +- +-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- *val = sd->blueblc; +- return 0; +-} +- + static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +From: Antonio Ospite +Date: Sat, 27 Feb 2010 20:20:19 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Remove hue control +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=6acd5f4ec11256cc11f8068f17121087765b5c7f + +V4L/DVB: gspca - ov534: Remove hue control + +Hue control doesn't work and the sensor datasheet is not clear about how +to set hue properly. + +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 2a0e8a3..186827a 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -60,7 +60,6 @@ struct sd { + u8 contrast; + u8 gain; + u8 exposure; +- u8 hue; + u8 autogain; + u8 awb; + s8 sharpness; +@@ -82,8 +81,6 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val); + static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); +-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val); +-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); +@@ -150,20 +147,6 @@ static const struct ctrl sd_ctrls[] = { + }, + { /* 4 */ + { +- .id = V4L2_CID_HUE, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Hue", +- .minimum = 0, +- .maximum = 255, +- .step = 1, +-#define HUE_DEF 143 +- .default_value = HUE_DEF, +- }, +- .set = sd_sethue, +- .get = sd_gethue, +- }, +- { /* 5 */ +- { + .id = V4L2_CID_AUTOGAIN, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Autogain", +@@ -176,8 +159,8 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setautogain, + .get = sd_getautogain, + }, +-#define AWB_IDX 6 +- { /* 6 */ ++#define AWB_IDX 5 ++ { /* 5 */ + { + .id = V4L2_CID_AUTO_WHITE_BALANCE, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -191,7 +174,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setawb, + .get = sd_getawb, + }, +- { /* 7 */ ++ { /* 6 */ + { + .id = V4L2_CID_SHARPNESS, + .type = V4L2_CTRL_TYPE_INTEGER, +@@ -205,7 +188,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setsharpness, + .get = sd_getsharpness, + }, +- { /* 8 */ ++ { /* 7 */ + { + .id = V4L2_CID_HFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -219,7 +202,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_sethflip, + .get = sd_gethflip, + }, +- { /* 9 */ ++ { /* 8 */ + { + .id = V4L2_CID_VFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -688,13 +671,6 @@ static void setexposure(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x10, val << 1); + } + +-static void sethue(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sccb_reg_write(gspca_dev, 0x01, sd->hue); +-} +- + static void setautogain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -777,7 +753,6 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->contrast = CONTRAST_DEF; + sd->gain = GAIN_DEF; + sd->exposure = EXPO_DEF; +- sd->hue = HUE_DEF; + #if AUTOGAIN_DEF != 0 + sd->autogain = AUTOGAIN_DEF; + #else +@@ -857,7 +832,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + setautogain(gspca_dev); + setawb(gspca_dev); + setgain(gspca_dev); +- sethue(gspca_dev); + setexposure(gspca_dev); + setbrightness(gspca_dev); + setcontrast(gspca_dev); +@@ -1040,24 +1014,6 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + +-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sd->hue = val; +- if (gspca_dev->streaming) +- sethue(gspca_dev); +- return 0; +-} +- +-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- *val = sd->hue; +- return 0; +-} +- + static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +From: Max Thrun +Date: Sat, 27 Feb 2010 20:20:20 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Fix autogain control, enable it by default +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=0de1d89b747863562f1fdf7501b8e1fb5d54b33e + +V4L/DVB: gspca - ov534: Fix autogain control, enable it by default + +* Use 'agc' instead of 'autogain' in the code so to align the naming + as in AEC/AWB. + * Tweak brightness and contrast default values. + * Fix setting/resetting registers values for AGC. + * Set actual gain back when disabling AGC. + * Skip setting GAIN register when AGC is enabled. + * Enable AGC by default. + +Note that as Auto Gain Control is now enabled by default, if you are +using the driver for visual computing applications you might need to +disable it explicitly in your software. + +Signed-off-by: Max Thrun +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 186827a..4058e22 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -60,7 +60,7 @@ struct sd { + u8 contrast; + u8 gain; + u8 exposure; +- u8 autogain; ++ u8 agc; + u8 awb; + s8 sharpness; + u8 hflip; +@@ -73,8 +73,8 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); +-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); +-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val); +@@ -97,7 +97,7 @@ static const struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 255, + .step = 1, +-#define BRIGHTNESS_DEF 20 ++#define BRIGHTNESS_DEF 0 + .default_value = BRIGHTNESS_DEF, + }, + .set = sd_setbrightness, +@@ -111,7 +111,7 @@ static const struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 255, + .step = 1, +-#define CONTRAST_DEF 37 ++#define CONTRAST_DEF 32 + .default_value = CONTRAST_DEF, + }, + .set = sd_setcontrast, +@@ -149,15 +149,15 @@ static const struct ctrl sd_ctrls[] = { + { + .id = V4L2_CID_AUTOGAIN, + .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "Autogain", ++ .name = "Auto Gain", + .minimum = 0, + .maximum = 1, + .step = 1, +-#define AUTOGAIN_DEF 0 +- .default_value = AUTOGAIN_DEF, ++#define AGC_DEF 1 ++ .default_value = AGC_DEF, + }, +- .set = sd_setautogain, +- .get = sd_getautogain, ++ .set = sd_setagc, ++ .get = sd_getagc, + }, + #define AWB_IDX 5 + { /* 5 */ +@@ -639,6 +639,9 @@ static void setgain(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + u8 val; + ++ if (sd->agc) ++ return; ++ + val = sd->gain; + switch (val & 0x30) { + case 0x00: +@@ -671,18 +674,22 @@ static void setexposure(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x10, val << 1); + } + +-static void setautogain(struct gspca_dev *gspca_dev) ++static void setagc(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + +- if (sd->autogain) { +- sccb_reg_write(gspca_dev, 0x13, 0xf7); /* AGC,AEC,AWB ON */ ++ if (sd->agc) { ++ sccb_reg_write(gspca_dev, 0x13, ++ sccb_reg_read(gspca_dev, 0x13) | 0x04); + sccb_reg_write(gspca_dev, 0x64, + sccb_reg_read(gspca_dev, 0x64) | 0x03); + } else { +- sccb_reg_write(gspca_dev, 0x13, 0xf0); /* AGC,AEC,AWB OFF */ ++ sccb_reg_write(gspca_dev, 0x13, ++ sccb_reg_read(gspca_dev, 0x13) & ~0x04); + sccb_reg_write(gspca_dev, 0x64, +- sccb_reg_read(gspca_dev, 0x64) & 0xfc); ++ sccb_reg_read(gspca_dev, 0x64) & ~0x03); ++ ++ setgain(gspca_dev); + } + } + +@@ -753,8 +760,8 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->contrast = CONTRAST_DEF; + sd->gain = GAIN_DEF; + sd->exposure = EXPO_DEF; +-#if AUTOGAIN_DEF != 0 +- sd->autogain = AUTOGAIN_DEF; ++#if AGC_DEF != 0 ++ sd->agc = AGC_DEF; + #else + gspca_dev->ctrl_inac |= (1 << AWB_IDX); + #endif +@@ -829,7 +836,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + } + set_frame_rate(gspca_dev); + +- setautogain(gspca_dev); ++ setagc(gspca_dev); + setawb(gspca_dev); + setgain(gspca_dev); + setexposure(gspca_dev); +@@ -1014,11 +1021,11 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + +-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) ++static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; + +- sd->autogain = val; ++ sd->agc = val; + + if (gspca_dev->streaming) { + +@@ -1028,16 +1035,16 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) + gspca_dev->ctrl_inac &= ~(1 << AWB_IDX); + else + gspca_dev->ctrl_inac |= (1 << AWB_IDX); +- setautogain(gspca_dev); ++ setagc(gspca_dev); + } + return 0; + } + +-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) ++static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val) + { + struct sd *sd = (struct sd *) gspca_dev; + +- *val = sd->autogain; ++ *val = sd->agc; + return 0; + } + +From: Max Thrun +Date: Sat, 27 Feb 2010 20:20:21 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Add Auto Exposure +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=9f886b52dd32c4eac5d5be13f74824856ff32baf + +V4L/DVB: gspca - ov534: Add Auto Exposure + +This also makes manual exposure actually work: it never worked before +because AEC was always enabled. + +Signed-off-by: Max Thrun +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 4058e22..2d89650 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -62,6 +62,7 @@ struct sd { + u8 exposure; + u8 agc; + u8 awb; ++ u8 aec; + s8 sharpness; + u8 hflip; + u8 vflip; +@@ -83,6 +84,8 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); +@@ -176,6 +179,20 @@ static const struct ctrl sd_ctrls[] = { + }, + { /* 6 */ + { ++ .id = V4L2_CID_EXPOSURE_AUTO, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "Auto Exposure", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, ++#define AEC_DEF 1 ++ .default_value = AEC_DEF, ++ }, ++ .set = sd_setaec, ++ .get = sd_getaec, ++ }, ++ { /* 7 */ ++ { + .id = V4L2_CID_SHARPNESS, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Sharpness", +@@ -188,7 +205,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setsharpness, + .get = sd_getsharpness, + }, +- { /* 7 */ ++ { /* 8 */ + { + .id = V4L2_CID_HFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -202,7 +219,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_sethflip, + .get = sd_gethflip, + }, +- { /* 8 */ ++ { /* 9 */ + { + .id = V4L2_CID_VFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, +@@ -703,6 +720,20 @@ static void setawb(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x63, 0xaa); /* AWB off */ + } + ++static void setaec(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (sd->aec) ++ sccb_reg_write(gspca_dev, 0x13, ++ sccb_reg_read(gspca_dev, 0x13) | 0x01); ++ else { ++ sccb_reg_write(gspca_dev, 0x13, ++ sccb_reg_read(gspca_dev, 0x13) & ~0x01); ++ setexposure(gspca_dev); ++ } ++} ++ + static void setsharpness(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -768,6 +799,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + #if AWB_DEF != 0 + sd->awb = AWB_DEF + #endif ++ sd->aec = AEC_DEF; + #if SHARPNESS_DEF != 0 + sd->sharpness = SHARPNESS_DEF; + #endif +@@ -838,6 +870,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + + setagc(gspca_dev); + setawb(gspca_dev); ++ setaec(gspca_dev); + setgain(gspca_dev); + setexposure(gspca_dev); + setbrightness(gspca_dev); +@@ -1066,6 +1099,24 @@ static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + ++static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->aec = val; ++ if (gspca_dev->streaming) ++ setaec(gspca_dev); ++ return 0; ++} ++ ++static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->aec; ++ return 0; ++} ++ + static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +From: Antonio Ospite +Date: Mon, 1 Mar 2010 11:53:34 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Fix and document setting manual exposure +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=c8b4264e60b378aa73dc59d434c930bc9345c890 + +V4L/DVB: gspca - ov534: Fix and document setting manual exposure + +Document that even if the state is a u8 value, both MSB and LSB are set +as sd->exposure represents half of the value we are going to set into +registers. + +Skip setting exposure when AEC is enabled. + +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 2d89650..4fda098 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -686,6 +686,15 @@ static void setexposure(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + u8 val; + ++ if (sd->aec) ++ return; ++ ++ /* 'val' is one byte and represents half of the exposure value we are ++ * going to set into registers, a two bytes value: ++ * ++ * MSB: ((u16) val << 1) >> 8 == val >> 7 ++ * LSB: ((u16) val << 1) & 0xff == val << 1 ++ */ + val = sd->exposure; + sccb_reg_write(gspca_dev, 0x08, val >> 7); + sccb_reg_write(gspca_dev, 0x10, val << 1); +From: Max Thrun +Date: Sat, 27 Feb 2010 20:20:23 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Fix Auto White Balance control +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=6a7410900b54e9af7f9282917a8bcbc9835ffaa1 + +V4L/DVB: gspca - ov534: Fix Auto White Balance control + +Set only the needed bits for AWB, and enable it by default. + +Signed-off-by: Max Thrun +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 4fda098..3b538d7 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -171,7 +171,7 @@ static const struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 1, + .step = 1, +-#define AWB_DEF 0 ++#define AWB_DEF 1 + .default_value = AWB_DEF, + }, + .set = sd_setawb, +@@ -723,10 +723,17 @@ static void setawb(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + +- if (sd->awb) +- sccb_reg_write(gspca_dev, 0x63, 0xe0); /* AWB on */ +- else +- sccb_reg_write(gspca_dev, 0x63, 0xaa); /* AWB off */ ++ if (sd->awb) { ++ sccb_reg_write(gspca_dev, 0x13, ++ sccb_reg_read(gspca_dev, 0x13) | 0x02); ++ sccb_reg_write(gspca_dev, 0x63, ++ sccb_reg_read(gspca_dev, 0x63) | 0xc0); ++ } else { ++ sccb_reg_write(gspca_dev, 0x13, ++ sccb_reg_read(gspca_dev, 0x13) & ~0x02); ++ sccb_reg_write(gspca_dev, 0x63, ++ sccb_reg_read(gspca_dev, 0x63) & ~0xc0); ++ } + } + + static void setaec(struct gspca_dev *gspca_dev) +@@ -805,9 +812,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + #else + gspca_dev->ctrl_inac |= (1 << AWB_IDX); + #endif +-#if AWB_DEF != 0 +- sd->awb = AWB_DEF +-#endif ++ sd->awb = AWB_DEF; + sd->aec = AEC_DEF; + #if SHARPNESS_DEF != 0 + sd->sharpness = SHARPNESS_DEF; +From: Max Thrun +Date: Sat, 27 Feb 2010 20:20:24 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Fixes for sharpness control +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=2bd647c5a4f8236b035f9db620930731ac77c957 + +V4L/DVB: gspca - ov534: Fixes for sharpness control + +* Adjust comments for sharpness control + * Set default value unconditionally, for readability + +Signed-off-by: Max Thrun +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 3b538d7..f2077ea 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -756,8 +756,8 @@ static void setsharpness(struct gspca_dev *gspca_dev) + u8 val; + + val = sd->sharpness; +- sccb_reg_write(gspca_dev, 0x91, val); /* vga noise */ +- sccb_reg_write(gspca_dev, 0x8e, val); /* qvga noise */ ++ sccb_reg_write(gspca_dev, 0x91, val); /* Auto de-noise threshold */ ++ sccb_reg_write(gspca_dev, 0x8e, val); /* De-noise threshold */ + } + + static void sethflip(struct gspca_dev *gspca_dev) +@@ -814,9 +814,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + #endif + sd->awb = AWB_DEF; + sd->aec = AEC_DEF; +-#if SHARPNESS_DEF != 0 + sd->sharpness = SHARPNESS_DEF; +-#endif + #if HFLIP_DEF != 0 + sd->hflip = HFLIP_DEF; + #endif +From: Max Thrun +Date: Sat, 27 Feb 2010 20:20:25 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Fix unsetting hflip and vflip bits +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=9398cf3859a02fcc3a310393857d5cfca61647f3 + +V4L/DVB: gspca - ov534: Fix unsetting hflip and vflip bits + +Also set default values unconditionally, for readability. + +Signed-off-by: Max Thrun +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index f2077ea..8f01201 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -769,7 +769,7 @@ static void sethflip(struct gspca_dev *gspca_dev) + sccb_reg_read(gspca_dev, 0x0c) | 0x40); + else + sccb_reg_write(gspca_dev, 0x0c, +- sccb_reg_read(gspca_dev, 0x0c) & 0xbf); ++ sccb_reg_read(gspca_dev, 0x0c) & ~0x40); + } + + static void setvflip(struct gspca_dev *gspca_dev) +@@ -781,7 +781,7 @@ static void setvflip(struct gspca_dev *gspca_dev) + sccb_reg_read(gspca_dev, 0x0c) | 0x80); + else + sccb_reg_write(gspca_dev, 0x0c, +- sccb_reg_read(gspca_dev, 0x0c) & 0x7f); ++ sccb_reg_read(gspca_dev, 0x0c) & ~0x80); + } + + /* this function is called at probe time */ +@@ -815,12 +815,8 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->awb = AWB_DEF; + sd->aec = AEC_DEF; + sd->sharpness = SHARPNESS_DEF; +-#if HFLIP_DEF != 0 + sd->hflip = HFLIP_DEF; +-#endif +-#if VFLIP_DEF != 0 + sd->vflip = VFLIP_DEF; +-#endif + + return 0; + } +From: Antonio Ospite +Date: Mon, 1 Mar 2010 11:54:33 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Cosmetics: fix indentation and hex digits +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=521acb59f1fa8d6a2bd37f8262da2b789014ac71 + +V4L/DVB: gspca - ov534: Cosmetics: fix indentation and hex digits + +* Indent with tabs, not with spaces, nor with mixed style. + * Less indentation for controls index comments. + * Use lowercase hex digits. + +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 8f01201..8783844 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -92,147 +92,147 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); + + static const struct ctrl sd_ctrls[] = { +- { /* 0 */ +- { +- .id = V4L2_CID_BRIGHTNESS, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Brightness", +- .minimum = 0, +- .maximum = 255, +- .step = 1, ++ { /* 0 */ ++ { ++ .id = V4L2_CID_BRIGHTNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Brightness", ++ .minimum = 0, ++ .maximum = 255, ++ .step = 1, + #define BRIGHTNESS_DEF 0 +- .default_value = BRIGHTNESS_DEF, ++ .default_value = BRIGHTNESS_DEF, ++ }, ++ .set = sd_setbrightness, ++ .get = sd_getbrightness, + }, +- .set = sd_setbrightness, +- .get = sd_getbrightness, +- }, +- { /* 1 */ +- { +- .id = V4L2_CID_CONTRAST, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Contrast", +- .minimum = 0, +- .maximum = 255, +- .step = 1, ++ { /* 1 */ ++ { ++ .id = V4L2_CID_CONTRAST, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Contrast", ++ .minimum = 0, ++ .maximum = 255, ++ .step = 1, + #define CONTRAST_DEF 32 +- .default_value = CONTRAST_DEF, ++ .default_value = CONTRAST_DEF, ++ }, ++ .set = sd_setcontrast, ++ .get = sd_getcontrast, + }, +- .set = sd_setcontrast, +- .get = sd_getcontrast, +- }, +- { /* 2 */ +- { +- .id = V4L2_CID_GAIN, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Main Gain", +- .minimum = 0, +- .maximum = 63, +- .step = 1, ++ { /* 2 */ ++ { ++ .id = V4L2_CID_GAIN, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Main Gain", ++ .minimum = 0, ++ .maximum = 63, ++ .step = 1, + #define GAIN_DEF 20 +- .default_value = GAIN_DEF, ++ .default_value = GAIN_DEF, ++ }, ++ .set = sd_setgain, ++ .get = sd_getgain, + }, +- .set = sd_setgain, +- .get = sd_getgain, +- }, +- { /* 3 */ +- { +- .id = V4L2_CID_EXPOSURE, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Exposure", +- .minimum = 0, +- .maximum = 255, +- .step = 1, ++ { /* 3 */ ++ { ++ .id = V4L2_CID_EXPOSURE, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Exposure", ++ .minimum = 0, ++ .maximum = 255, ++ .step = 1, + #define EXPO_DEF 120 +- .default_value = EXPO_DEF, ++ .default_value = EXPO_DEF, ++ }, ++ .set = sd_setexposure, ++ .get = sd_getexposure, + }, +- .set = sd_setexposure, +- .get = sd_getexposure, +- }, +- { /* 4 */ +- { +- .id = V4L2_CID_AUTOGAIN, +- .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "Auto Gain", +- .minimum = 0, +- .maximum = 1, +- .step = 1, ++ { /* 4 */ ++ { ++ .id = V4L2_CID_AUTOGAIN, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "Auto Gain", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, + #define AGC_DEF 1 +- .default_value = AGC_DEF, ++ .default_value = AGC_DEF, ++ }, ++ .set = sd_setagc, ++ .get = sd_getagc, + }, +- .set = sd_setagc, +- .get = sd_getagc, +- }, + #define AWB_IDX 5 +- { /* 5 */ +- { +- .id = V4L2_CID_AUTO_WHITE_BALANCE, +- .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "Auto White Balance", +- .minimum = 0, +- .maximum = 1, +- .step = 1, ++ { /* 5 */ ++ { ++ .id = V4L2_CID_AUTO_WHITE_BALANCE, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "Auto White Balance", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, + #define AWB_DEF 1 +- .default_value = AWB_DEF, ++ .default_value = AWB_DEF, ++ }, ++ .set = sd_setawb, ++ .get = sd_getawb, + }, +- .set = sd_setawb, +- .get = sd_getawb, +- }, +- { /* 6 */ +- { +- .id = V4L2_CID_EXPOSURE_AUTO, +- .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "Auto Exposure", +- .minimum = 0, +- .maximum = 1, +- .step = 1, ++ { /* 6 */ ++ { ++ .id = V4L2_CID_EXPOSURE_AUTO, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "Auto Exposure", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, + #define AEC_DEF 1 +- .default_value = AEC_DEF, ++ .default_value = AEC_DEF, ++ }, ++ .set = sd_setaec, ++ .get = sd_getaec, + }, +- .set = sd_setaec, +- .get = sd_getaec, +- }, +- { /* 7 */ +- { +- .id = V4L2_CID_SHARPNESS, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Sharpness", +- .minimum = 0, +- .maximum = 63, +- .step = 1, ++ { /* 7 */ ++ { ++ .id = V4L2_CID_SHARPNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Sharpness", ++ .minimum = 0, ++ .maximum = 63, ++ .step = 1, + #define SHARPNESS_DEF 0 +- .default_value = SHARPNESS_DEF, ++ .default_value = SHARPNESS_DEF, ++ }, ++ .set = sd_setsharpness, ++ .get = sd_getsharpness, + }, +- .set = sd_setsharpness, +- .get = sd_getsharpness, +- }, +- { /* 8 */ +- { +- .id = V4L2_CID_HFLIP, +- .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "HFlip", +- .minimum = 0, +- .maximum = 1, +- .step = 1, ++ { /* 8 */ ++ { ++ .id = V4L2_CID_HFLIP, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "HFlip", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, + #define HFLIP_DEF 0 +- .default_value = HFLIP_DEF, ++ .default_value = HFLIP_DEF, ++ }, ++ .set = sd_sethflip, ++ .get = sd_gethflip, + }, +- .set = sd_sethflip, +- .get = sd_gethflip, +- }, +- { /* 9 */ +- { +- .id = V4L2_CID_VFLIP, +- .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "VFlip", +- .minimum = 0, +- .maximum = 1, +- .step = 1, ++ { /* 9 */ ++ { ++ .id = V4L2_CID_VFLIP, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "VFlip", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, + #define VFLIP_DEF 0 +- .default_value = VFLIP_DEF, ++ .default_value = VFLIP_DEF, ++ }, ++ .set = sd_setvflip, ++ .get = sd_getvflip, + }, +- .set = sd_setvflip, +- .get = sd_getvflip, +- }, + }; + + static const struct v4l2_pix_format ov772x_mode[] = { +@@ -641,14 +641,14 @@ static void setbrightness(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + +- sccb_reg_write(gspca_dev, 0x9B, sd->brightness); ++ sccb_reg_write(gspca_dev, 0x9b, sd->brightness); + } + + static void setcontrast(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + +- sccb_reg_write(gspca_dev, 0x9C, sd->contrast); ++ sccb_reg_write(gspca_dev, 0x9c, sd->contrast); + } + + static void setgain(struct gspca_dev *gspca_dev) +From: Mosalam Ebrahimi +Date: Mon, 8 Mar 2010 16:52:17 +0000 (-0300) +Subject: V4L/DVB: gspca - ov534: Add Powerline Frequency control +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=9bc3ac54da6a52969088caca9f6acdf682fa8ace + +V4L/DVB: gspca - ov534: Add Powerline Frequency control + +Note that setting this options to 50Hz can reduce the framerate, so it is +still disabled (60Hz) by default. + +Signed-off-by: Mosalam Ebrahimi +Signed-off-by: Antonio Ospite +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 8783844..29af17e 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -66,7 +66,7 @@ struct sd { + s8 sharpness; + u8 hflip; + u8 vflip; +- ++ u8 freqfltr; + }; + + /* V4L2 controls supported by the driver */ +@@ -90,6 +90,10 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_querymenu(struct gspca_dev *gspca_dev, ++ struct v4l2_querymenu *menu); + + static const struct ctrl sd_ctrls[] = { + { /* 0 */ +@@ -233,6 +237,20 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setvflip, + .get = sd_getvflip, + }, ++ { /* 10 */ ++ { ++ .id = V4L2_CID_POWER_LINE_FREQUENCY, ++ .type = V4L2_CTRL_TYPE_MENU, ++ .name = "Light Frequency Filter", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, ++#define FREQFLTR_DEF 0 ++ .default_value = FREQFLTR_DEF, ++ }, ++ .set = sd_setfreqfltr, ++ .get = sd_getfreqfltr, ++ }, + }; + + static const struct v4l2_pix_format ov772x_mode[] = { +@@ -784,6 +802,17 @@ static void setvflip(struct gspca_dev *gspca_dev) + sccb_reg_read(gspca_dev, 0x0c) & ~0x80); + } + ++static void setfreqfltr(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (sd->freqfltr == 0) ++ sccb_reg_write(gspca_dev, 0x2b, 0x00); ++ else ++ sccb_reg_write(gspca_dev, 0x2b, 0x9e); ++} ++ ++ + /* this function is called at probe time */ + static int sd_config(struct gspca_dev *gspca_dev, + const struct usb_device_id *id) +@@ -817,6 +846,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->sharpness = SHARPNESS_DEF; + sd->hflip = HFLIP_DEF; + sd->vflip = VFLIP_DEF; ++ sd->freqfltr = FREQFLTR_DEF; + + return 0; + } +@@ -886,6 +916,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + setsharpness(gspca_dev); + setvflip(gspca_dev); + sethflip(gspca_dev); ++ setfreqfltr(gspca_dev); + + ov534_set_led(gspca_dev, 1); + ov534_reg_write(gspca_dev, 0xe0, 0x00); +@@ -1179,6 +1210,43 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + ++static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->freqfltr = val; ++ if (gspca_dev->streaming) ++ setfreqfltr(gspca_dev); ++ return 0; ++} ++ ++static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->freqfltr; ++ return 0; ++} ++ ++static int sd_querymenu(struct gspca_dev *gspca_dev, ++ struct v4l2_querymenu *menu) ++{ ++ switch (menu->id) { ++ case V4L2_CID_POWER_LINE_FREQUENCY: ++ switch (menu->index) { ++ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ ++ strcpy((char *) menu->name, "Disabled"); ++ return 0; ++ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ ++ strcpy((char *) menu->name, "50 Hz"); ++ return 0; ++ } ++ break; ++ } ++ ++ return -EINVAL; ++} ++ + /* get stream parameters (framerate) */ + static int sd_get_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_streamparm *parm) +@@ -1230,6 +1298,7 @@ static const struct sd_desc sd_desc = { + .start = sd_start, + .stopN = sd_stopN, + .pkt_scan = sd_pkt_scan, ++ .querymenu = sd_querymenu, + .get_streamparm = sd_get_streamparm, + .set_streamparm = sd_set_streamparm, + }; +gspca_spca561: Fix LED on rev12a cameras + +From: Hans de Goede + +Fix LED control on rev12a cameras and remove the unneeded +sd_stop0 callback function. + +Priority: normal + +Signed-off-by: Hans de Goede + +--- a/drivers/media/video/gspca/spca561.c Sat Feb 20 15:26:07 2010 +0100 ++++ b/drivers/media/video/gspca/spca561.c Sat Feb 27 11:18:14 2010 +0100 +@@ -280,9 +280,9 @@ + }; + + static const __u16 spca561_161rev12A_data1[][2] = { +- {0x29, 0x8118}, /* white balance - was 21 */ +- {0x08, 0x8114}, /* white balance - was 01 */ +- {0x0e, 0x8112}, /* white balance - was 00 */ ++ {0x29, 0x8118}, /* Control register (various enable bits) */ ++ {0x08, 0x8114}, /* GPIO: Led off */ ++ {0x0e, 0x8112}, /* 0x0e stream off 0x3e stream on */ + {0x00, 0x8102}, /* white balance - new */ + {0x92, 0x8804}, + {0x04, 0x8802}, /* windows uses 08 */ +@@ -294,15 +294,11 @@ + {0x07, 0x8601}, + {0x07, 0x8602}, + {0x04, 0x8501}, +- {0x21, 0x8118}, + + {0x07, 0x8201}, /* windows uses 02 */ + {0x08, 0x8200}, + {0x01, 0x8200}, + +- {0x00, 0x8114}, +- {0x01, 0x8114}, /* windows uses 00 */ +- + {0x90, 0x8604}, + {0x00, 0x8605}, + {0xb0, 0x8603}, +@@ -333,6 +329,9 @@ + {0xf0, 0x8505}, + {0x32, 0x850a}, + /* {0x99, 0x8700}, * - white balance - new (removed) */ ++ /* HDG we used to do this in stop0, making the init state and the state ++ after a start / stop different, so do this here instead. */ ++ {0x29, 0x8118}, + {} + }; + +@@ -676,6 +675,9 @@ + setwhite(gspca_dev); + setgain(gspca_dev); + setexposure(gspca_dev); ++ ++ /* Led ON (bit 3 -> 0 */ ++ reg_w_val(gspca_dev->dev, 0x8114, 0x00); + return 0; + } + static int sd_start_72a(struct gspca_dev *gspca_dev) +@@ -722,26 +724,14 @@ + + if (sd->chip_revision == Rev012A) { + reg_w_val(gspca_dev->dev, 0x8112, 0x0e); ++ /* Led Off (bit 3 -> 1 */ ++ reg_w_val(gspca_dev->dev, 0x8114, 0x08); + } else { + reg_w_val(gspca_dev->dev, 0x8112, 0x20); + /* reg_w_val(gspca_dev->dev, 0x8102, 0x00); ?? */ + } + } + +-/* called on streamoff with alt 0 and on disconnect */ +-static void sd_stop0(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- if (!gspca_dev->present) +- return; +- if (sd->chip_revision == Rev012A) { +- reg_w_val(gspca_dev->dev, 0x8118, 0x29); +- reg_w_val(gspca_dev->dev, 0x8114, 0x08); +- } +-/* reg_w_val(gspca_dev->dev, 0x8114, 0); */ +-} +- + static void do_autogain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -1059,7 +1049,6 @@ + .init = sd_init_12a, + .start = sd_start_12a, + .stopN = sd_stopN, +- .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, + }; + static const struct sd_desc sd_desc_72a = { +@@ -1070,7 +1059,6 @@ + .init = sd_init_72a, + .start = sd_start_72a, + .stopN = sd_stopN, +- .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, + .dq_callback = do_autogain, + }; + +gspca_spca561: Add support for camera button + +From: Hans de Goede + +gspca_spca561: Add support for camera button + +Priority: normal + +Signed-off-by: Hans de Goede + +--- a/drivers/media/video/gspca/spca561.c Sat Feb 27 11:18:14 2010 +0100 ++++ b/drivers/media/video/gspca/spca561.c Sun Feb 28 13:41:04 2010 +0100 +@@ -22,6 +22,7 @@ + + #define MODULE_NAME "spca561" + ++#include + #include "gspca.h" + + MODULE_AUTHOR("Michel Xhaard "); +@@ -809,6 +810,23 @@ + switch (*data++) { /* sequence number */ + case 0: /* start of frame */ + gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); ++ ++ /* This should never happen */ ++ if (len < 2) { ++ PDEBUG(D_ERR, "Short SOF packet, ignoring"); ++ gspca_dev->last_packet_type = DISCARD_PACKET; ++ return; ++ } ++ ++#ifdef CONFIG_INPUT ++ if (data[0] & 0x20) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ } ++#endif ++ + if (data[1] & 0x10) { + /* compressed bayer */ + gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); +@@ -1050,6 +1068,9 @@ + .start = sd_start_12a, + .stopN = sd_stopN, + .pkt_scan = sd_pkt_scan, ++#ifdef CONFIG_INPUT ++ .other_input = 1, ++#endif + }; + static const struct sd_desc sd_desc_72a = { + .name = MODULE_NAME, +@@ -1061,6 +1082,9 @@ + .stopN = sd_stopN, + .pkt_scan = sd_pkt_scan, + .dq_callback = do_autogain, ++#ifdef CONFIG_INPUT ++ .other_input = 1, ++#endif + }; + static const struct sd_desc *sd_desc[2] = { + &sd_desc_12a, +From: Jean-François Moine +Date: Wed, 17 Mar 2010 18:25:32 +0000 (-0300) +Subject: V4L/DVB: gspca - sonixj: More static const and better array initialization +X-Git-Url: http://git.linuxtv.org/v4l-dvb.git?a=commitdiff_plain;h=5bcd8657cca97ad5a9c159659f6ba857e35960b8 + +V4L/DVB: gspca - sonixj: More static const and better array initialization + +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c +index 83d5773..eb43f22 100644 +--- a/drivers/media/video/gspca/sonixj.c ++++ b/drivers/media/video/gspca/sonixj.c +@@ -66,17 +66,19 @@ struct sd { + #define BRIDGE_SN9C110 2 + #define BRIDGE_SN9C120 3 + u8 sensor; /* Type of image sensor chip */ +-#define SENSOR_ADCM1700 0 +-#define SENSOR_HV7131R 1 +-#define SENSOR_MI0360 2 +-#define SENSOR_MO4000 3 +-#define SENSOR_MT9V111 4 +-#define SENSOR_OM6802 5 +-#define SENSOR_OV7630 6 +-#define SENSOR_OV7648 7 +-#define SENSOR_OV7660 8 +-#define SENSOR_PO1030 9 +-#define SENSOR_SP80708 10 ++enum { ++ SENSOR_ADCM1700, ++ SENSOR_HV7131R, ++ SENSOR_MI0360, ++ SENSOR_MO4000, ++ SENSOR_MT9V111, ++ SENSOR_OM6802, ++ SENSOR_OV7630, ++ SENSOR_OV7648, ++ SENSOR_OV7660, ++ SENSOR_PO1030, ++ SENSOR_SP80708, ++} sensors; + u8 i2c_addr; + + u8 *jpeg_hdr; +@@ -280,29 +282,47 @@ static const struct ctrl sd_ctrls[] = { + }; + + /* table of the disabled controls */ +-static __u32 ctrl_dis[] = { +- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX) | +- (1 << AUTOGAIN_IDX), /* SENSOR_ADCM1700 0 */ +- (1 << INFRARED_IDX) | (1 << FREQ_IDX), +- /* SENSOR_HV7131R 1 */ +- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_MI0360 2 */ +- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_MO4000 3 */ +- (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_MT9V111 4 */ +- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_OM6802 5 */ +- (1 << INFRARED_IDX), +- /* SENSOR_OV7630 6 */ +- (1 << INFRARED_IDX), +- /* SENSOR_OV7648 7 */ +- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), +- /* SENSOR_OV7660 8 */ +- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | +- (1 << FREQ_IDX), /* SENSOR_PO1030 9 */ +- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | +- (1 << FREQ_IDX), /* SENSOR_SP80708 10 */ ++static const __u32 ctrl_dis[] = { ++[SENSOR_ADCM1700] = (1 << AUTOGAIN_IDX) | ++ (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_HV7131R] = (1 << INFRARED_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_MI0360] = (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_MO4000] = (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_MT9V111] = (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_OM6802] = (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_OV7630] = (1 << INFRARED_IDX), ++ ++[SENSOR_OV7648] = (1 << INFRARED_IDX), ++ ++[SENSOR_OV7660] = (1 << AUTOGAIN_IDX) | ++ (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX), ++ ++[SENSOR_PO1030] = (1 << AUTOGAIN_IDX) | ++ (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ ++[SENSOR_SP80708] = (1 << AUTOGAIN_IDX) | ++ (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), + }; + + static const struct v4l2_pix_format cif_mode[] = { +@@ -455,17 +475,17 @@ static const u8 sn_sp80708[0x1c] = { + + /* sequence specific to the sensors - !! index = SENSOR_xxx */ + static const u8 *sn_tb[] = { +- sn_adcm1700, +- sn_hv7131, +- sn_mi0360, +- sn_mo4000, +- sn_mt9v111, +- sn_om6802, +- sn_ov7630, +- sn_ov7648, +- sn_ov7660, +- sn_po1030, +- sn_sp80708 ++[SENSOR_ADCM1700] = sn_adcm1700, ++[SENSOR_HV7131R] = sn_hv7131, ++[SENSOR_MI0360] = sn_mi0360, ++[SENSOR_MO4000] = sn_mo4000, ++[SENSOR_MT9V111] = sn_mt9v111, ++[SENSOR_OM6802] = sn_om6802, ++[SENSOR_OV7630] = sn_ov7630, ++[SENSOR_OV7648] = sn_ov7648, ++[SENSOR_OV7660] = sn_ov7660, ++[SENSOR_PO1030] = sn_po1030, ++[SENSOR_SP80708] = sn_sp80708 + }; + + /* default gamma table */ +@@ -1068,18 +1088,18 @@ static const u8 sp80708_sensor_param1[][8] = { + {} + }; + +-static const u8 (*sensor_init[11])[8] = { +- adcm1700_sensor_init, /* ADCM1700 0 */ +- hv7131r_sensor_init, /* HV7131R 1 */ +- mi0360_sensor_init, /* MI0360 2 */ +- mo4000_sensor_init, /* MO4000 3 */ +- mt9v111_sensor_init, /* MT9V111 4 */ +- om6802_sensor_init, /* OM6802 5 */ +- ov7630_sensor_init, /* OV7630 6 */ +- ov7648_sensor_init, /* OV7648 7 */ +- ov7660_sensor_init, /* OV7660 8 */ +- po1030_sensor_init, /* PO1030 9 */ +- sp80708_sensor_init, /* SP80708 10 */ ++static const u8 (*sensor_init[])[8] = { ++[SENSOR_ADCM1700] = adcm1700_sensor_init, ++[SENSOR_HV7131R] = hv7131r_sensor_init, ++[SENSOR_MI0360] = mi0360_sensor_init, ++[SENSOR_MO4000] = mo4000_sensor_init, ++[SENSOR_MT9V111] = mt9v111_sensor_init, ++[SENSOR_OM6802] = om6802_sensor_init, ++[SENSOR_OV7630] = ov7630_sensor_init, ++[SENSOR_OV7648] = ov7648_sensor_init, ++[SENSOR_OV7660] = ov7660_sensor_init, ++[SENSOR_PO1030] = po1030_sensor_init, ++[SENSOR_SP80708] = sp80708_sensor_init, + }; + + /* read bytes to gspca_dev->usb_buf */ +@@ -1702,7 +1722,7 @@ static void setcolors(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + int i, v; + u8 reg8a[12]; /* U & V gains */ +- static s16 uv[6] = { /* same as reg84 in signed decimal */ ++ static const s16 uv[6] = { /* same as reg84 in signed decimal */ + -24, -38, 64, /* UR UG UB */ + 62, -51, -9 /* VR VG VB */ + }; +From: Jean-François Moine +Date: Thu, 18 Mar 2010 08:15:30 +0000 (-0300) +Subject: V4L/DVB: gspca - sonixj: Add webcam 0c45:6142 with sensors gc0307 and po2030n +X-Git-Url: http://git.linuxtv.org/v4l-dvb.git?a=commitdiff_plain;h=7ece2ad1a65a3c92c2573ee5c79d45159cbd6183 + +V4L/DVB: gspca - sonixj: Add webcam 0c45:6142 with sensors gc0307 and po2030n + +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt +index 181b9e6..9475e1e 100644 +--- a/Documentation/video4linux/gspca.txt ++++ b/Documentation/video4linux/gspca.txt +@@ -305,6 +305,7 @@ sonixj 0c45:6138 Sn9c120 Mo4000 + sonixj 0c45:613a Microdia Sonix PC Camera + sonixj 0c45:613b Surfer SN-206 + sonixj 0c45:613c Sonix Pccam168 ++sonixj 0c45:6142 Hama PC-Webcam AC-150 + sonixj 0c45:6143 Sonix Pccam168 + sonixj 0c45:6148 Digitus DA-70811/ZSMC USB PC Camera ZS211/Microdia + sonixj 0c45:614a Frontech E-Ccam (JIL-2225) +diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c +index eb43f22..5e727aa 100644 +--- a/drivers/media/video/gspca/sonixj.c ++++ b/drivers/media/video/gspca/sonixj.c +@@ -68,6 +68,7 @@ struct sd { + u8 sensor; /* Type of image sensor chip */ + enum { + SENSOR_ADCM1700, ++ SENSOR_GC0307, + SENSOR_HV7131R, + SENSOR_MI0360, + SENSOR_MO4000, +@@ -77,6 +78,7 @@ enum { + SENSOR_OV7648, + SENSOR_OV7660, + SENSOR_PO1030, ++ SENSOR_PO2030N, + SENSOR_SP80708, + } sensors; + u8 i2c_addr; +@@ -288,6 +290,11 @@ static const __u32 ctrl_dis[] = { + (1 << VFLIP_IDX) | + (1 << FREQ_IDX), + ++[SENSOR_GC0307] = (1 << AUTOGAIN_IDX) | ++ (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ + [SENSOR_HV7131R] = (1 << INFRARED_IDX) | + (1 << FREQ_IDX), + +@@ -319,6 +326,11 @@ static const __u32 ctrl_dis[] = { + (1 << VFLIP_IDX) | + (1 << FREQ_IDX), + ++[SENSOR_PO2030N] = (1 << AUTOGAIN_IDX) | ++ (1 << INFRARED_IDX) | ++ (1 << VFLIP_IDX) | ++ (1 << FREQ_IDX), ++ + [SENSOR_SP80708] = (1 << AUTOGAIN_IDX) | + (1 << INFRARED_IDX) | + (1 << VFLIP_IDX) | +@@ -362,7 +374,17 @@ static const u8 sn_adcm1700[0x1c] = { + 0x06, 0x00, 0x00, 0x00 + }; + +-/*Data from sn9c102p+hv7131r */ ++static const u8 sn_gc0307[0x1c] = { ++/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ ++ 0x00, 0x61, 0x62, 0x00, 0x1a, 0x00, 0x00, 0x00, ++/* reg8 reg9 rega regb regc regd rege regf */ ++ 0x80, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ ++ 0x03, 0x00, 0x03, 0x01, 0x08, 0x28, 0x1e, 0x02, ++/* reg18 reg19 reg1a reg1b */ ++ 0x06, 0x00, 0x00, 0x00 ++}; ++ + static const u8 sn_hv7131[0x1c] = { + /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ + 0x00, 0x03, 0x64, 0x00, 0x1a, 0x20, 0x20, 0x20, +@@ -462,6 +484,17 @@ static const u8 sn_po1030[0x1c] = { + 0x07, 0x00, 0x00, 0x00 + }; + ++static const u8 sn_po2030n[0x1c] = { ++/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ ++ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, ++/* reg8 reg9 rega regb regc regd rege regf */ ++ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ ++ 0x03, 0x00, 0x00, 0x01, 0x14, 0x28, 0x1e, 0x00, ++/* reg18 reg19 reg1a reg1b */ ++ 0x07, 0x00, 0x00, 0x00 ++}; ++ + static const u8 sn_sp80708[0x1c] = { + /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ + 0x00, 0x63, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20, +@@ -476,6 +509,7 @@ static const u8 sn_sp80708[0x1c] = { + /* sequence specific to the sensors - !! index = SENSOR_xxx */ + static const u8 *sn_tb[] = { + [SENSOR_ADCM1700] = sn_adcm1700, ++[SENSOR_GC0307] = sn_gc0307, + [SENSOR_HV7131R] = sn_hv7131, + [SENSOR_MI0360] = sn_mi0360, + [SENSOR_MO4000] = sn_mo4000, +@@ -485,6 +519,7 @@ static const u8 *sn_tb[] = { + [SENSOR_OV7648] = sn_ov7648, + [SENSOR_OV7660] = sn_ov7660, + [SENSOR_PO1030] = sn_po1030, ++[SENSOR_PO2030N] = sn_po2030n, + [SENSOR_SP80708] = sn_sp80708 + }; + +@@ -503,8 +538,13 @@ static const u8 gamma_spec_1[17] = { + 0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d, + 0xa9, 0xb4, 0xbe, 0xc8, 0xd2, 0xdb, 0xe4, 0xed, 0xf5 + }; +-/* gamma for sensor SP80708 */ ++/* gamma for sensor GC0307 */ + static const u8 gamma_spec_2[17] = { ++ 0x14, 0x37, 0x50, 0x6a, 0x7c, 0x8d, 0x9d, 0xab, ++ 0xb5, 0xbf, 0xc2, 0xcb, 0xd1, 0xd6, 0xdb, 0xe1, 0xeb ++}; ++/* gamma for sensor SP80708 */ ++static const u8 gamma_spec_3[17] = { + 0x0a, 0x2d, 0x4e, 0x68, 0x7d, 0x8f, 0x9f, 0xab, + 0xb7, 0xc2, 0xcc, 0xd3, 0xd8, 0xde, 0xe2, 0xe5, 0xe6 + }; +@@ -552,6 +592,64 @@ static const u8 adcm1700_sensor_param1[][8] = { + {0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10}, + {} + }; ++static const u8 gc0307_sensor_init[][8] = { ++ {0xa0, 0x21, 0x43, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x44, 0xa2, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x01, 0x6a, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x02, 0x70, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x11, 0x05, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x09, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x0a, 0xe8, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x0b, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x0d, 0x22, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/ ++ {0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x17, 0x52, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x18, 0x50, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x1e, 0x0d, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x1f, 0x32, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x61, 0x90, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x63, 0x70, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x65, 0x98, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x67, 0x90, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x04, 0x96, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x45, 0x27, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x47, 0x2c, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x43, 0x47, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x44, 0xd8, 0x00, 0x00, 0x00, 0x10}, ++ {} ++}; ++static const u8 gc0307_sensor_param1[][8] = { ++ {0xa0, 0x21, 0x68, 0x13, 0x00, 0x00, 0x00, 0x10}, ++ {0xd0, 0x21, 0x61, 0x80, 0x00, 0x80, 0x00, 0x10}, ++ {0xc0, 0x21, 0x65, 0x80, 0x00, 0x80, 0x00, 0x10}, ++ {0xc0, 0x21, 0x63, 0xa0, 0x00, 0xa6, 0x00, 0x10}, ++/*param3*/ ++ {0xa0, 0x21, 0x01, 0x6e, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x21, 0x02, 0x88, 0x00, 0x00, 0x00, 0x10}, ++ ++ {0xa0, 0x21, 0x68, 0x22, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ {0xa0, 0x21, 0x03, 0x07, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ {0xa0, 0x21, 0x04, 0x91, 0x00, 0x00, 0x00, 0x10}, ++ {} ++}; ++ + static const u8 hv7131r_sensor_init[][8] = { + {0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10}, + {0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10}, +@@ -1003,6 +1101,82 @@ static const u8 po1030_sensor_param1[][8] = { + {} + }; + ++static const u8 po2030n_sensor_init[][8] = { ++ {0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */ ++ {0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */ ++ {0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x08, 0x00, 0xd0, 0x00, 0x08, 0x10}, ++ {0xd1, 0x6e, 0x0c, 0x03, 0x50, 0x01, 0xe8, 0x10}, ++ {0xd1, 0x6e, 0x1d, 0x20, 0x0a, 0x19, 0x44, 0x10}, ++ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x29, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x35, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x41, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x45, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x49, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x4d, 0x00, 0x00, 0x00, 0xed, 0x10}, ++ {0xd1, 0x6e, 0x51, 0x17, 0x4a, 0x2f, 0xc0, 0x10}, ++ {0xd1, 0x6e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x59, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x61, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x69, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x71, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x75, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x79, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x81, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x85, 0x00, 0x00, 0x00, 0x08, 0x10}, ++ {0xd1, 0x6e, 0x89, 0x01, 0xe8, 0x00, 0x01, 0x10}, ++ {0xa1, 0x6e, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x01, 0x10}, ++ {0xd1, 0x6e, 0x29, 0xe6, 0x00, 0xbd, 0x03, 0x10}, ++ {0xd1, 0x6e, 0x2d, 0x41, 0x38, 0x68, 0x40, 0x10}, ++ {0xd1, 0x6e, 0x31, 0x2b, 0x00, 0x36, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x35, 0x30, 0x30, 0x08, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x33, 0x06, 0x10}, ++ {0xb1, 0x6e, 0x3d, 0x06, 0x02, 0x00, 0x00, 0x10}, ++ {} ++}; ++static const u8 po2030n_sensor_param1[][8] = { ++ {0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */ ++ {0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, ++ {0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10}, ++/*param2*/ ++ {0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x05, 0x6f, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10}, ++ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, ++ {0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10}, ++/*after start*/ ++ {0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */ ++ {0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */ ++ {0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10}, ++ {} ++}; ++ + static const u8 sp80708_sensor_init[][8] = { + {0xa1, 0x18, 0x06, 0xf9, 0x00, 0x00, 0x00, 0x10}, + {0xa1, 0x18, 0x09, 0x1f, 0x00, 0x00, 0x00, 0x10}, +@@ -1090,6 +1264,7 @@ static const u8 sp80708_sensor_param1[][8] = { + + static const u8 (*sensor_init[])[8] = { + [SENSOR_ADCM1700] = adcm1700_sensor_init, ++[SENSOR_GC0307] = gc0307_sensor_init, + [SENSOR_HV7131R] = hv7131r_sensor_init, + [SENSOR_MI0360] = mi0360_sensor_init, + [SENSOR_MO4000] = mo4000_sensor_init, +@@ -1099,6 +1274,7 @@ static const u8 (*sensor_init[])[8] = { + [SENSOR_OV7648] = ov7648_sensor_init, + [SENSOR_OV7660] = ov7660_sensor_init, + [SENSOR_PO1030] = po1030_sensor_init, ++[SENSOR_PO2030N] = po2030n_sensor_init, + [SENSOR_SP80708] = sp80708_sensor_init, + }; + +@@ -1168,7 +1344,8 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) + PDEBUG(D_USBO, "i2c_w2 [%02x] = %02x", reg, val); + switch (sd->sensor) { + case SENSOR_ADCM1700: +- case SENSOR_OM6802: /* i2c command = a0 (100 kHz) */ ++ case SENSOR_OM6802: ++ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */ + gspca_dev->usb_buf[0] = 0x80 | (2 << 4); + break; + default: /* i2c command = a1 (400 kHz) */ +@@ -1215,7 +1392,8 @@ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len) + + switch (sd->sensor) { + case SENSOR_ADCM1700: +- case SENSOR_OM6802: /* i2c command = 90 (100 kHz) */ ++ case SENSOR_OM6802: ++ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */ + mode[0] = 0x80 | 0x10; + break; + default: /* i2c command = 91 (400 kHz) */ +@@ -1354,6 +1532,46 @@ static void ov7648_probe(struct gspca_dev *gspca_dev) + gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]); + } + ++/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */ ++static void po2030n_probe(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ /* check gc0307 */ ++ reg_w1(gspca_dev, 0x17, 0x62); ++ reg_w1(gspca_dev, 0x01, 0x08); ++ reg_w1(gspca_dev, 0x02, 0x22); ++ sd->i2c_addr = 0x21; ++ i2c_r(gspca_dev, 0x00, 1); ++ ++ reg_w1(gspca_dev, 0x01, 0x29); /* reset */ ++ reg_w1(gspca_dev, 0x17, 0x42); ++ ++ if (gspca_dev->usb_buf[4] == 0x99) { /* gc0307 (?) */ ++ PDEBUG(D_PROBE, "Sensor gc0307"); ++ sd->sensor = SENSOR_GC0307; ++ return; ++ } ++ ++ /* check po2030n */ ++ reg_w1(gspca_dev, 0x17, 0x62); ++ reg_w1(gspca_dev, 0x01, 0x0a); ++ sd->i2c_addr = 0x6e; ++ i2c_r(gspca_dev, 0x00, 2); ++ ++ reg_w1(gspca_dev, 0x01, 0x29); ++ reg_w1(gspca_dev, 0x17, 0x42); ++ ++ if (gspca_dev->usb_buf[3] == 0x20 ++ && gspca_dev->usb_buf[4] == 0x30) ++ PDEBUG(D_PROBE, "Sensor po2030n"); ++/* sd->sensor = SENSOR_PO2030N; */ ++ else ++ PDEBUG(D_PROBE, "Unknown sensor ID %02x%02x", ++ gspca_dev->usb_buf[3], ++ gspca_dev->usb_buf[4]); ++} ++ + static void bridge_init(struct gspca_dev *gspca_dev, + const u8 *sn9c1xx) + { +@@ -1374,8 +1592,10 @@ static void bridge_init(struct gspca_dev *gspca_dev, + reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); + reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); + switch (sd->sensor) { ++ case SENSOR_GC0307: + case SENSOR_OV7660: + case SENSOR_PO1030: ++ case SENSOR_PO2030N: + case SENSOR_SP80708: + reg9a = reg9a_spec; + break; +@@ -1396,6 +1616,14 @@ static void bridge_init(struct gspca_dev *gspca_dev, + reg_w1(gspca_dev, 0x01, 0x42); + reg_w1(gspca_dev, 0x01, 0x42); + break; ++ case SENSOR_GC0307: ++ msleep(50); ++ reg_w1(gspca_dev, 0x01, 0x61); ++ reg_w1(gspca_dev, 0x17, 0x22); ++ reg_w1(gspca_dev, 0x01, 0x60); ++ reg_w1(gspca_dev, 0x01, 0x40); ++ msleep(50); ++ break; + case SENSOR_MT9V111: + reg_w1(gspca_dev, 0x01, 0x61); + reg_w1(gspca_dev, 0x17, 0x61); +@@ -1438,6 +1666,12 @@ static void bridge_init(struct gspca_dev *gspca_dev, + reg_w1(gspca_dev, 0x01, 0x60); + reg_w1(gspca_dev, 0x01, 0x40); + break; ++ case SENSOR_PO2030N: ++ reg_w1(gspca_dev, 0x01, 0x63); ++ reg_w1(gspca_dev, 0x17, 0x20); ++ reg_w1(gspca_dev, 0x01, 0x62); ++ reg_w1(gspca_dev, 0x01, 0x42); ++ break; + case SENSOR_OV7660: + /* fall thru */ + case SENSOR_SP80708: +@@ -1545,6 +1779,9 @@ static int sd_init(struct gspca_dev *gspca_dev) + case SENSOR_OV7648: + ov7648_probe(gspca_dev); + break; ++ case SENSOR_PO2030N: ++ po2030n_probe(gspca_dev); ++ break; + } + regGpio[1] = 0x70; + reg_w(gspca_dev, 0x01, regGpio, 2); +@@ -1763,9 +2000,12 @@ static void setgamma(struct gspca_dev *gspca_dev) + case SENSOR_MT9V111: + gamma_base = gamma_spec_1; + break; +- case SENSOR_SP80708: ++ case SENSOR_GC0307: + gamma_base = gamma_spec_2; + break; ++ case SENSOR_SP80708: ++ gamma_base = gamma_spec_3; ++ break; + default: + gamma_base = gamma_def; + break; +@@ -1956,9 +2196,15 @@ static int sd_start(struct gspca_dev *gspca_dev) + static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; + static const u8 CA_adcm1700[] = + { 0x14, 0xec, 0x0a, 0xf6 }; ++ static const u8 CA_po2030n[] = ++ { 0x1e, 0xe2, 0x14, 0xec }; + static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ ++ static const u8 CE_gc0307[] = ++ { 0x32, 0xce, 0x2d, 0xd3 }; + static const u8 CE_ov76xx[] = + { 0x32, 0xdd, 0x32, 0xdd }; ++ static const u8 CE_po2030n[] = ++ { 0x14, 0xe7, 0x1e, 0xdd }; + + /* create the JPEG header */ + sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); +@@ -2015,6 +2261,9 @@ static int sd_start(struct gspca_dev *gspca_dev) + } + reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); + switch (sd->sensor) { ++ case SENSOR_GC0307: ++ reg17 = 0xa2; ++ break; + case SENSOR_MT9V111: + reg17 = 0xe0; + break; +@@ -2029,6 +2278,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg17 = 0xa0; + break; + case SENSOR_PO1030: ++ case SENSOR_PO2030N: + reg17 = 0xa0; + break; + default: +@@ -2053,12 +2303,16 @@ static int sd_start(struct gspca_dev *gspca_dev) + case SENSOR_SP80708: + reg_w1(gspca_dev, 0x9a, 0x05); + break; ++ case SENSOR_GC0307: + case SENSOR_MT9V111: + reg_w1(gspca_dev, 0x9a, 0x07); + break; + case SENSOR_OV7648: + reg_w1(gspca_dev, 0x9a, 0x0a); + break; ++ case SENSOR_PO2030N: ++ reg_w1(gspca_dev, 0x9a, 0x06); ++ break; + default: + reg_w1(gspca_dev, 0x9a, 0x08); + break; +@@ -2083,6 +2337,11 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg1 = 0x46; + reg17 = 0xe2; + break; ++ case SENSOR_GC0307: ++ init = gc0307_sensor_param1; ++ reg17 = 0xa2; ++ reg1 = 0x44; ++ break; + case SENSOR_MO4000: + if (mode) { + /* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */ +@@ -2132,6 +2391,11 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg17 = 0xa2; + reg1 = 0x44; + break; ++ case SENSOR_PO2030N: ++ init = po2030n_sensor_param1; ++ reg1 = 0x46; ++ reg17 = 0xa2; ++ break; + default: + /* case SENSOR_SP80708: */ + init = sp80708_sensor_param1; +@@ -2151,10 +2415,18 @@ static int sd_start(struct gspca_dev *gspca_dev) + } + + reg_w(gspca_dev, 0xc0, C0, 6); +- if (sd->sensor == SENSOR_ADCM1700) ++ switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ case SENSOR_GC0307: + reg_w(gspca_dev, 0xca, CA_adcm1700, 4); +- else ++ break; ++ case SENSOR_PO2030N: ++ reg_w(gspca_dev, 0xca, CA_po2030n, 4); ++ break; ++ default: + reg_w(gspca_dev, 0xca, CA, 4); ++ break; ++ } + switch (sd->sensor) { + case SENSOR_ADCM1700: + case SENSOR_OV7630: +@@ -2162,6 +2434,12 @@ static int sd_start(struct gspca_dev *gspca_dev) + case SENSOR_OV7660: + reg_w(gspca_dev, 0xce, CE_ov76xx, 4); + break; ++ case SENSOR_GC0307: ++ reg_w(gspca_dev, 0xce, CE_gc0307, 4); ++ break; ++ case SENSOR_PO2030N: ++ reg_w(gspca_dev, 0xce, CE_po2030n, 4); ++ break; + default: + reg_w(gspca_dev, 0xce, CE, 4); + /* ?? {0x1e, 0xdd, 0x2d, 0xe7} */ +@@ -2199,6 +2477,9 @@ static void sd_stopN(struct gspca_dev *gspca_dev) + + data = 0x0b; + switch (sd->sensor) { ++ case SENSOR_GC0307: ++ data = 0x29; ++ break; + case SENSOR_HV7131R: + i2c_w8(gspca_dev, stophv7131); + data = 0x2b; +@@ -2675,7 +2956,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + #endif + {USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)}, + {USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)}, +-/* {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, *sn9c120b*/ ++ {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/ + {USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/ + {USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/ + {USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/ +From: Jean-François Moine +Date: Fri, 2 Apr 2010 10:12:42 +0000 (+0200) +Subject: gspca - vc032x: Change the ov7670 format to YUYV. +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=510d87cf78323250749ee9b95631f28f1d942020 + +gspca - vc032x: Change the ov7670 format to YUYV. + +Signed-off-by: Jean-François Moine +--- + +diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c +index 7fde145..732c3df 100644 +--- a/drivers/media/video/gspca/vc032x.c ++++ b/drivers/media/video/gspca/vc032x.c +@@ -1971,268 +1971,489 @@ static const u8 ov7660_NoFliker[][4] = { + {} + }; + +-static const u8 ov7670_initVGA_JPG[][4] = { ++static const u8 ov7670_InitVGA[][4] = { + {0xb3, 0x01, 0x05, 0xcc}, +- {0x00, 0x00, 0x30, 0xdd}, {0xb0, 0x03, 0x19, 0xcc}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xb0, 0x03, 0x19, 0xcc}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xb0, 0x04, 0x02, 0xcc}, + {0x00, 0x00, 0x10, 0xdd}, +- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd}, +- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb3, 0x00, 0x66, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb0, 0x16, 0x01, 0xcc}, + {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */ + {0xb3, 0x34, 0x01, 0xcc}, +- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc}, +- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc}, +- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc}, +- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc}, +- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc}, +- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc}, +- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc}, +- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0x41, 0xcc}, +- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa}, +- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa}, +- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa}, +- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa}, +- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa}, +- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa}, +- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa}, +- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa}, +- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa}, +- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa}, +- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa}, +- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa}, +- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa}, +- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa}, +- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa}, +- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa}, +- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa}, +- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa}, +- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa}, +- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa}, +- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa}, +- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa}, +- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa}, +- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa}, ++ {0xb3, 0x05, 0x01, 0xcc}, ++ {0xb3, 0x06, 0x01, 0xcc}, ++ {0xb3, 0x08, 0x01, 0xcc}, ++ {0xb3, 0x09, 0x0c, 0xcc}, ++ {0xb3, 0x02, 0x02, 0xcc}, ++ {0xb3, 0x03, 0x1f, 0xcc}, ++ {0xb3, 0x14, 0x00, 0xcc}, ++ {0xb3, 0x15, 0x00, 0xcc}, ++ {0xb3, 0x16, 0x02, 0xcc}, ++ {0xb3, 0x17, 0x7f, 0xcc}, ++ {0xb3, 0x04, 0x05, 0xcc}, ++ {0xb3, 0x20, 0x00, 0xcc}, ++ {0xb3, 0x21, 0x00, 0xcc}, ++ {0xb3, 0x22, 0x01, 0xcc}, ++ {0xb3, 0x23, 0xe0, 0xcc}, ++ {0xbc, 0x00, 0x41, 0xcc}, ++ {0xbc, 0x01, 0x01, 0xcc}, ++ {0x00, 0x12, 0x80, 0xaa}, ++ {0x00, 0x00, 0x20, 0xdd}, ++ {0x00, 0x12, 0x00, 0xaa}, ++ {0x00, 0x11, 0x40, 0xaa}, ++ {0x00, 0x6b, 0x0a, 0xaa}, ++ {0x00, 0x3a, 0x04, 0xaa}, ++ {0x00, 0x40, 0xc0, 0xaa}, ++ {0x00, 0x8c, 0x00, 0xaa}, ++ {0x00, 0x7a, 0x29, 0xaa}, ++ {0x00, 0x7b, 0x0e, 0xaa}, ++ {0x00, 0x7c, 0x1a, 0xaa}, ++ {0x00, 0x7d, 0x31, 0xaa}, ++ {0x00, 0x7e, 0x53, 0xaa}, ++ {0x00, 0x7f, 0x60, 0xaa}, ++ {0x00, 0x80, 0x6b, 0xaa}, ++ {0x00, 0x81, 0x73, 0xaa}, ++ {0x00, 0x82, 0x7b, 0xaa}, ++ {0x00, 0x83, 0x82, 0xaa}, ++ {0x00, 0x84, 0x89, 0xaa}, ++ {0x00, 0x85, 0x96, 0xaa}, ++ {0x00, 0x86, 0xa1, 0xaa}, ++ {0x00, 0x87, 0xb7, 0xaa}, ++ {0x00, 0x88, 0xcc, 0xaa}, ++ {0x00, 0x89, 0xe1, 0xaa}, ++ {0x00, 0x13, 0xe0, 0xaa}, ++ {0x00, 0x00, 0x00, 0xaa}, ++ {0x00, 0x10, 0x00, 0xaa}, ++ {0x00, 0x0d, 0x40, 0xaa}, ++ {0x00, 0x14, 0x28, 0xaa}, ++ {0x00, 0xa5, 0x05, 0xaa}, ++ {0x00, 0xab, 0x07, 0xaa}, ++ {0x00, 0x24, 0x95, 0xaa}, ++ {0x00, 0x25, 0x33, 0xaa}, ++ {0x00, 0x26, 0xe3, 0xaa}, ++ {0x00, 0x9f, 0x88, 0xaa}, ++ {0x00, 0xa0, 0x78, 0xaa}, ++ {0x00, 0x55, 0x90, 0xaa}, ++ {0x00, 0xa1, 0x03, 0xaa}, ++ {0x00, 0xa6, 0xe0, 0xaa}, ++ {0x00, 0xa7, 0xd8, 0xaa}, ++ {0x00, 0xa8, 0xf0, 0xaa}, ++ {0x00, 0xa9, 0x90, 0xaa}, ++ {0x00, 0xaa, 0x14, 0xaa}, ++ {0x00, 0x13, 0xe5, 0xaa}, ++ {0x00, 0x0e, 0x61, 0xaa}, ++ {0x00, 0x0f, 0x4b, 0xaa}, ++ {0x00, 0x16, 0x02, 0xaa}, + {0x00, 0x1e, 0x07, 0xaa}, /* MVFP */ + {0x00, 0x21, 0x02, 0xaa}, +- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa}, +- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa}, +- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa}, +- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa}, +- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa}, +- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa}, +- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa}, +- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa}, +- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa}, +- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa}, +- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa}, +- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa}, +- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa}, +- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa}, +- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa}, +- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa}, +- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa}, +- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa}, +- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa}, +- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa}, +- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa}, +- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa}, +- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa}, +- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa}, +- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa}, +- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa}, +- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa}, +- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa}, +- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa}, +- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa}, +- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa}, +- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa}, +- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa}, +- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa}, +- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa}, +- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa}, +- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa}, +- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa}, +- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa}, +- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa}, +- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa}, +- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa}, +- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa}, +- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa}, +- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa}, +- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa}, +- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa}, +- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa}, +- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa}, +- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa}, +- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa}, +- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa}, +- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa}, +- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa}, +- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa}, +- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa}, +- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa}, +- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa}, +- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa}, +- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa}, +- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa}, +- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa}, +- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa}, +- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa}, ++ {0x00, 0x22, 0x91, 0xaa}, ++ {0x00, 0x29, 0x07, 0xaa}, ++ {0x00, 0x33, 0x0b, 0xaa}, ++ {0x00, 0x35, 0x0b, 0xaa}, ++ {0x00, 0x37, 0x1d, 0xaa}, ++ {0x00, 0x38, 0x71, 0xaa}, ++ {0x00, 0x39, 0x2a, 0xaa}, ++ {0x00, 0x3c, 0x78, 0xaa}, ++ {0x00, 0x4d, 0x40, 0xaa}, ++ {0x00, 0x4e, 0x20, 0xaa}, ++ {0x00, 0x74, 0x19, 0xaa}, ++ {0x00, 0x8d, 0x4f, 0xaa}, ++ {0x00, 0x8e, 0x00, 0xaa}, ++ {0x00, 0x8f, 0x00, 0xaa}, ++ {0x00, 0x90, 0x00, 0xaa}, ++ {0x00, 0x91, 0x00, 0xaa}, ++ {0x00, 0x96, 0x00, 0xaa}, ++ {0x00, 0x9a, 0x80, 0xaa}, ++ {0x00, 0xb0, 0x84, 0xaa}, ++ {0x00, 0xb1, 0x0c, 0xaa}, ++ {0x00, 0xb2, 0x0e, 0xaa}, ++ {0x00, 0xb3, 0x82, 0xaa}, ++ {0x00, 0xb8, 0x0a, 0xaa}, ++ {0x00, 0x43, 0x14, 0xaa}, ++ {0x00, 0x44, 0xf0, 0xaa}, ++ {0x00, 0x45, 0x45, 0xaa}, ++ {0x00, 0x46, 0x63, 0xaa}, ++ {0x00, 0x47, 0x2d, 0xaa}, ++ {0x00, 0x48, 0x46, 0xaa}, ++ {0x00, 0x59, 0x88, 0xaa}, ++ {0x00, 0x5a, 0xa0, 0xaa}, ++ {0x00, 0x5b, 0xc6, 0xaa}, ++ {0x00, 0x5c, 0x7d, 0xaa}, ++ {0x00, 0x5d, 0x5f, 0xaa}, ++ {0x00, 0x5e, 0x19, 0xaa}, ++ {0x00, 0x6c, 0x0a, 0xaa}, ++ {0x00, 0x6d, 0x55, 0xaa}, ++ {0x00, 0x6e, 0x11, 0xaa}, ++ {0x00, 0x6f, 0x9e, 0xaa}, ++ {0x00, 0x69, 0x00, 0xaa}, ++ {0x00, 0x6a, 0x40, 0xaa}, ++ {0x00, 0x01, 0x40, 0xaa}, ++ {0x00, 0x02, 0x40, 0xaa}, ++ {0x00, 0x13, 0xe7, 0xaa}, ++ {0x00, 0x5f, 0xf0, 0xaa}, ++ {0x00, 0x60, 0xf0, 0xaa}, ++ {0x00, 0x61, 0xf0, 0xaa}, ++ {0x00, 0x27, 0xa0, 0xaa}, ++ {0x00, 0x28, 0x80, 0xaa}, ++ {0x00, 0x2c, 0x90, 0xaa}, ++ {0x00, 0x4f, 0x66, 0xaa}, ++ {0x00, 0x50, 0x66, 0xaa}, ++ {0x00, 0x51, 0x00, 0xaa}, ++ {0x00, 0x52, 0x22, 0xaa}, ++ {0x00, 0x53, 0x5e, 0xaa}, ++ {0x00, 0x54, 0x80, 0xaa}, ++ {0x00, 0x58, 0x9e, 0xaa}, ++ {0x00, 0x41, 0x08, 0xaa}, ++ {0x00, 0x3f, 0x00, 0xaa}, ++ {0x00, 0x75, 0x85, 0xaa}, ++ {0x00, 0x76, 0xe1, 0xaa}, ++ {0x00, 0x4c, 0x00, 0xaa}, ++ {0x00, 0x77, 0x0a, 0xaa}, ++ {0x00, 0x3d, 0x88, 0xaa}, ++ {0x00, 0x4b, 0x09, 0xaa}, ++ {0x00, 0xc9, 0x60, 0xaa}, ++ {0x00, 0x41, 0x38, 0xaa}, ++ {0x00, 0x62, 0x30, 0xaa}, ++ {0x00, 0x63, 0x30, 0xaa}, ++ {0x00, 0x64, 0x08, 0xaa}, ++ {0x00, 0x94, 0x07, 0xaa}, ++ {0x00, 0x95, 0x0b, 0xaa}, ++ {0x00, 0x65, 0x00, 0xaa}, ++ {0x00, 0x66, 0x05, 0xaa}, ++ {0x00, 0x56, 0x50, 0xaa}, ++ {0x00, 0x34, 0x11, 0xaa}, ++ {0x00, 0xa4, 0x88, 0xaa}, ++ {0x00, 0x96, 0x00, 0xaa}, ++ {0x00, 0x97, 0x30, 0xaa}, ++ {0x00, 0x98, 0x20, 0xaa}, ++ {0x00, 0x99, 0x30, 0xaa}, ++ {0x00, 0x9a, 0x84, 0xaa}, ++ {0x00, 0x9b, 0x29, 0xaa}, ++ {0x00, 0x9c, 0x03, 0xaa}, ++ {0x00, 0x78, 0x04, 0xaa}, ++ {0x00, 0x79, 0x01, 0xaa}, ++ {0x00, 0xc8, 0xf0, 0xaa}, ++ {0x00, 0x79, 0x0f, 0xaa}, ++ {0x00, 0xc8, 0x00, 0xaa}, ++ {0x00, 0x79, 0x10, 0xaa}, ++ {0x00, 0xc8, 0x7e, 0xaa}, ++ {0x00, 0x79, 0x0a, 0xaa}, ++ {0x00, 0xc8, 0x80, 0xaa}, ++ {0x00, 0x79, 0x0b, 0xaa}, ++ {0x00, 0xc8, 0x01, 0xaa}, ++ {0x00, 0x79, 0x0c, 0xaa}, ++ {0x00, 0xc8, 0x0f, 0xaa}, ++ {0x00, 0x79, 0x0d, 0xaa}, ++ {0x00, 0xc8, 0x20, 0xaa}, ++ {0x00, 0x79, 0x09, 0xaa}, ++ {0x00, 0xc8, 0x80, 0xaa}, ++ {0x00, 0x79, 0x02, 0xaa}, ++ {0x00, 0xc8, 0xc0, 0xaa}, ++ {0x00, 0x79, 0x03, 0xaa}, ++ {0x00, 0xc8, 0x40, 0xaa}, ++ {0x00, 0x79, 0x05, 0xaa}, ++ {0x00, 0xc8, 0x30, 0xaa}, ++ {0x00, 0x79, 0x26, 0xaa}, ++ {0x00, 0x11, 0x40, 0xaa}, ++ {0x00, 0x3a, 0x04, 0xaa}, ++ {0x00, 0x12, 0x00, 0xaa}, ++ {0x00, 0x40, 0xc0, 0xaa}, ++ {0x00, 0x8c, 0x00, 0xaa}, ++ {0x00, 0x17, 0x14, 0xaa}, ++ {0x00, 0x18, 0x02, 0xaa}, ++ {0x00, 0x32, 0x92, 0xaa}, ++ {0x00, 0x19, 0x02, 0xaa}, ++ {0x00, 0x1a, 0x7a, 0xaa}, ++ {0x00, 0x03, 0x0a, 0xaa}, ++ {0x00, 0x0c, 0x00, 0xaa}, ++ {0x00, 0x3e, 0x00, 0xaa}, ++ {0x00, 0x70, 0x3a, 0xaa}, ++ {0x00, 0x71, 0x35, 0xaa}, ++ {0x00, 0x72, 0x11, 0xaa}, ++ {0x00, 0x73, 0xf0, 0xaa}, ++ {0x00, 0xa2, 0x02, 0xaa}, ++ {0x00, 0xb1, 0x00, 0xaa}, ++ {0x00, 0xb1, 0x0c, 0xaa}, + {0x00, 0x1e, 0x37, 0xaa}, /* MVFP */ + {0x00, 0xaa, 0x14, 0xaa}, +- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa}, +- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa}, +- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa}, +- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa}, +- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa}, +- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa}, +- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa}, +- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa}, +- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa}, +- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa}, +- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc}, +- {0xb6, 0x03, 0x02, 0xcc}, {0xb6, 0x02, 0x80, 0xcc}, +- {0xb6, 0x05, 0x01, 0xcc}, {0xb6, 0x04, 0xe0, 0xcc}, +- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x13, 0xcc}, +- {0xb6, 0x18, 0x02, 0xcc}, {0xb6, 0x17, 0x58, 0xcc}, +- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc}, +- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc}, +- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc}, +- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc}, ++ {0x00, 0x24, 0x80, 0xaa}, ++ {0x00, 0x25, 0x74, 0xaa}, ++ {0x00, 0x26, 0xd3, 0xaa}, ++ {0x00, 0x0d, 0x00, 0xaa}, ++ {0x00, 0x14, 0x18, 0xaa}, ++ {0x00, 0x9d, 0x99, 0xaa}, ++ {0x00, 0x9e, 0x7f, 0xaa}, ++ {0x00, 0x64, 0x08, 0xaa}, ++ {0x00, 0x94, 0x07, 0xaa}, ++ {0x00, 0x95, 0x06, 0xaa}, ++ {0x00, 0x66, 0x05, 0xaa}, ++ {0x00, 0x41, 0x08, 0xaa}, ++ {0x00, 0x3f, 0x00, 0xaa}, ++ {0x00, 0x75, 0x07, 0xaa}, ++ {0x00, 0x76, 0xe1, 0xaa}, ++ {0x00, 0x4c, 0x00, 0xaa}, ++ {0x00, 0x77, 0x00, 0xaa}, ++ {0x00, 0x3d, 0xc2, 0xaa}, ++ {0x00, 0x4b, 0x09, 0xaa}, ++ {0x00, 0xc9, 0x60, 0xaa}, ++ {0x00, 0x41, 0x38, 0xaa}, ++ {0xbf, 0xc0, 0x26, 0xcc}, ++ {0xbf, 0xc1, 0x02, 0xcc}, ++ {0xbf, 0xcc, 0x04, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xb3, 0x01, 0x45, 0xcc}, + {0x00, 0x77, 0x05, 0xaa}, + {}, + }; + +-static const u8 ov7670_initQVGA_JPG[][4] = { +- {0xb3, 0x01, 0x05, 0xcc}, {0x00, 0x00, 0x30, 0xdd}, +- {0xb0, 0x03, 0x19, 0xcc}, {0x00, 0x00, 0x10, 0xdd}, +- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd}, +- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc}, +- {0xb3, 0x35, 0xa1, 0xcc}, {0xb3, 0x34, 0x01, 0xcc}, +- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc}, +- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc}, +- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc}, +- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc}, +- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc}, +- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc}, +- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc}, +- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0xd1, 0xcc}, +- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa}, +- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa}, +- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa}, +- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa}, +- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa}, +- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa}, +- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa}, +- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa}, +- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa}, +- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa}, +- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa}, +- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa}, +- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa}, +- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa}, +- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa}, +- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa}, +- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa}, +- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa}, +- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa}, +- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa}, +- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa}, +- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa}, +- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa}, +- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa}, ++static const u8 ov7670_InitQVGA[][4] = { ++ {0xb3, 0x01, 0x05, 0xcc}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xb0, 0x03, 0x19, 0xcc}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xb0, 0x04, 0x02, 0xcc}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xb3, 0x00, 0x66, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb0, 0x16, 0x01, 0xcc}, ++ {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */ ++ {0xb3, 0x34, 0x01, 0xcc}, ++ {0xb3, 0x05, 0x01, 0xcc}, ++ {0xb3, 0x06, 0x01, 0xcc}, ++ {0xb3, 0x08, 0x01, 0xcc}, ++ {0xb3, 0x09, 0x0c, 0xcc}, ++ {0xb3, 0x02, 0x02, 0xcc}, ++ {0xb3, 0x03, 0x1f, 0xcc}, ++ {0xb3, 0x14, 0x00, 0xcc}, ++ {0xb3, 0x15, 0x00, 0xcc}, ++ {0xb3, 0x16, 0x02, 0xcc}, ++ {0xb3, 0x17, 0x7f, 0xcc}, ++ {0xb3, 0x04, 0x05, 0xcc}, ++ {0xb3, 0x20, 0x00, 0xcc}, ++ {0xb3, 0x21, 0x00, 0xcc}, ++ {0xb3, 0x22, 0x01, 0xcc}, ++ {0xb3, 0x23, 0xe0, 0xcc}, ++ {0xbc, 0x00, 0xd1, 0xcc}, ++ {0xbc, 0x01, 0x01, 0xcc}, ++ {0x00, 0x12, 0x80, 0xaa}, ++ {0x00, 0x00, 0x20, 0xdd}, ++ {0x00, 0x12, 0x00, 0xaa}, ++ {0x00, 0x11, 0x40, 0xaa}, ++ {0x00, 0x6b, 0x0a, 0xaa}, ++ {0x00, 0x3a, 0x04, 0xaa}, ++ {0x00, 0x40, 0xc0, 0xaa}, ++ {0x00, 0x8c, 0x00, 0xaa}, ++ {0x00, 0x7a, 0x29, 0xaa}, ++ {0x00, 0x7b, 0x0e, 0xaa}, ++ {0x00, 0x7c, 0x1a, 0xaa}, ++ {0x00, 0x7d, 0x31, 0xaa}, ++ {0x00, 0x7e, 0x53, 0xaa}, ++ {0x00, 0x7f, 0x60, 0xaa}, ++ {0x00, 0x80, 0x6b, 0xaa}, ++ {0x00, 0x81, 0x73, 0xaa}, ++ {0x00, 0x82, 0x7b, 0xaa}, ++ {0x00, 0x83, 0x82, 0xaa}, ++ {0x00, 0x84, 0x89, 0xaa}, ++ {0x00, 0x85, 0x96, 0xaa}, ++ {0x00, 0x86, 0xa1, 0xaa}, ++ {0x00, 0x87, 0xb7, 0xaa}, ++ {0x00, 0x88, 0xcc, 0xaa}, ++ {0x00, 0x89, 0xe1, 0xaa}, ++ {0x00, 0x13, 0xe0, 0xaa}, ++ {0x00, 0x00, 0x00, 0xaa}, ++ {0x00, 0x10, 0x00, 0xaa}, ++ {0x00, 0x0d, 0x40, 0xaa}, ++ {0x00, 0x14, 0x28, 0xaa}, ++ {0x00, 0xa5, 0x05, 0xaa}, ++ {0x00, 0xab, 0x07, 0xaa}, ++ {0x00, 0x24, 0x95, 0xaa}, ++ {0x00, 0x25, 0x33, 0xaa}, ++ {0x00, 0x26, 0xe3, 0xaa}, ++ {0x00, 0x9f, 0x88, 0xaa}, ++ {0x00, 0xa0, 0x78, 0xaa}, ++ {0x00, 0x55, 0x90, 0xaa}, ++ {0x00, 0xa1, 0x03, 0xaa}, ++ {0x00, 0xa6, 0xe0, 0xaa}, ++ {0x00, 0xa7, 0xd8, 0xaa}, ++ {0x00, 0xa8, 0xf0, 0xaa}, ++ {0x00, 0xa9, 0x90, 0xaa}, ++ {0x00, 0xaa, 0x14, 0xaa}, ++ {0x00, 0x13, 0xe5, 0xaa}, ++ {0x00, 0x0e, 0x61, 0xaa}, ++ {0x00, 0x0f, 0x4b, 0xaa}, ++ {0x00, 0x16, 0x02, 0xaa}, + {0x00, 0x1e, 0x07, 0xaa}, /* MVFP */ + {0x00, 0x21, 0x02, 0xaa}, +- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa}, +- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa}, +- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa}, +- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa}, +- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa}, +- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa}, +- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa}, +- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa}, +- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa}, +- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa}, +- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa}, +- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa}, +- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa}, +- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa}, +- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa}, +- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa}, +- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa}, +- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa}, +- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa}, +- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa}, +- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa}, +- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa}, +- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa}, +- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa}, +- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa}, +- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa}, +- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa}, +- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa}, +- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa}, +- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa}, +- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa}, +- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa}, +- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa}, +- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa}, +- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa}, +- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa}, +- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa}, +- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa}, +- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa}, +- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa}, +- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa}, +- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa}, +- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa}, +- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa}, +- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa}, +- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa}, +- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa}, +- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa}, +- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa}, +- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa}, +- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa}, +- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa}, +- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa}, +- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa}, +- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa}, +- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa}, +- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa}, +- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa}, +- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa}, +- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa}, +- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa}, +- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa}, +- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa}, +- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa}, ++ {0x00, 0x22, 0x91, 0xaa}, ++ {0x00, 0x29, 0x07, 0xaa}, ++ {0x00, 0x33, 0x0b, 0xaa}, ++ {0x00, 0x35, 0x0b, 0xaa}, ++ {0x00, 0x37, 0x1d, 0xaa}, ++ {0x00, 0x38, 0x71, 0xaa}, ++ {0x00, 0x39, 0x2a, 0xaa}, ++ {0x00, 0x3c, 0x78, 0xaa}, ++ {0x00, 0x4d, 0x40, 0xaa}, ++ {0x00, 0x4e, 0x20, 0xaa}, ++ {0x00, 0x74, 0x19, 0xaa}, ++ {0x00, 0x8d, 0x4f, 0xaa}, ++ {0x00, 0x8e, 0x00, 0xaa}, ++ {0x00, 0x8f, 0x00, 0xaa}, ++ {0x00, 0x90, 0x00, 0xaa}, ++ {0x00, 0x91, 0x00, 0xaa}, ++ {0x00, 0x96, 0x00, 0xaa}, ++ {0x00, 0x9a, 0x80, 0xaa}, ++ {0x00, 0xb0, 0x84, 0xaa}, ++ {0x00, 0xb1, 0x0c, 0xaa}, ++ {0x00, 0xb2, 0x0e, 0xaa}, ++ {0x00, 0xb3, 0x82, 0xaa}, ++ {0x00, 0xb8, 0x0a, 0xaa}, ++ {0x00, 0x43, 0x14, 0xaa}, ++ {0x00, 0x44, 0xf0, 0xaa}, ++ {0x00, 0x45, 0x45, 0xaa}, ++ {0x00, 0x46, 0x63, 0xaa}, ++ {0x00, 0x47, 0x2d, 0xaa}, ++ {0x00, 0x48, 0x46, 0xaa}, ++ {0x00, 0x59, 0x88, 0xaa}, ++ {0x00, 0x5a, 0xa0, 0xaa}, ++ {0x00, 0x5b, 0xc6, 0xaa}, ++ {0x00, 0x5c, 0x7d, 0xaa}, ++ {0x00, 0x5d, 0x5f, 0xaa}, ++ {0x00, 0x5e, 0x19, 0xaa}, ++ {0x00, 0x6c, 0x0a, 0xaa}, ++ {0x00, 0x6d, 0x55, 0xaa}, ++ {0x00, 0x6e, 0x11, 0xaa}, ++ {0x00, 0x6f, 0x9e, 0xaa}, ++ {0x00, 0x69, 0x00, 0xaa}, ++ {0x00, 0x6a, 0x40, 0xaa}, ++ {0x00, 0x01, 0x40, 0xaa}, ++ {0x00, 0x02, 0x40, 0xaa}, ++ {0x00, 0x13, 0xe7, 0xaa}, ++ {0x00, 0x5f, 0xf0, 0xaa}, ++ {0x00, 0x60, 0xf0, 0xaa}, ++ {0x00, 0x61, 0xf0, 0xaa}, ++ {0x00, 0x27, 0xa0, 0xaa}, ++ {0x00, 0x28, 0x80, 0xaa}, ++ {0x00, 0x2c, 0x90, 0xaa}, ++ {0x00, 0x4f, 0x66, 0xaa}, ++ {0x00, 0x50, 0x66, 0xaa}, ++ {0x00, 0x51, 0x00, 0xaa}, ++ {0x00, 0x52, 0x22, 0xaa}, ++ {0x00, 0x53, 0x5e, 0xaa}, ++ {0x00, 0x54, 0x80, 0xaa}, ++ {0x00, 0x58, 0x9e, 0xaa}, ++ {0x00, 0x41, 0x08, 0xaa}, ++ {0x00, 0x3f, 0x00, 0xaa}, ++ {0x00, 0x75, 0x85, 0xaa}, ++ {0x00, 0x76, 0xe1, 0xaa}, ++ {0x00, 0x4c, 0x00, 0xaa}, ++ {0x00, 0x77, 0x0a, 0xaa}, ++ {0x00, 0x3d, 0x88, 0xaa}, ++ {0x00, 0x4b, 0x09, 0xaa}, ++ {0x00, 0xc9, 0x60, 0xaa}, ++ {0x00, 0x41, 0x38, 0xaa}, ++ {0x00, 0x62, 0x30, 0xaa}, ++ {0x00, 0x63, 0x30, 0xaa}, ++ {0x00, 0x64, 0x08, 0xaa}, ++ {0x00, 0x94, 0x07, 0xaa}, ++ {0x00, 0x95, 0x0b, 0xaa}, ++ {0x00, 0x65, 0x00, 0xaa}, ++ {0x00, 0x66, 0x05, 0xaa}, ++ {0x00, 0x56, 0x50, 0xaa}, ++ {0x00, 0x34, 0x11, 0xaa}, ++ {0x00, 0xa4, 0x88, 0xaa}, ++ {0x00, 0x96, 0x00, 0xaa}, ++ {0x00, 0x97, 0x30, 0xaa}, ++ {0x00, 0x98, 0x20, 0xaa}, ++ {0x00, 0x99, 0x30, 0xaa}, ++ {0x00, 0x9a, 0x84, 0xaa}, ++ {0x00, 0x9b, 0x29, 0xaa}, ++ {0x00, 0x9c, 0x03, 0xaa}, ++ {0x00, 0x78, 0x04, 0xaa}, ++ {0x00, 0x79, 0x01, 0xaa}, ++ {0x00, 0xc8, 0xf0, 0xaa}, ++ {0x00, 0x79, 0x0f, 0xaa}, ++ {0x00, 0xc8, 0x00, 0xaa}, ++ {0x00, 0x79, 0x10, 0xaa}, ++ {0x00, 0xc8, 0x7e, 0xaa}, ++ {0x00, 0x79, 0x0a, 0xaa}, ++ {0x00, 0xc8, 0x80, 0xaa}, ++ {0x00, 0x79, 0x0b, 0xaa}, ++ {0x00, 0xc8, 0x01, 0xaa}, ++ {0x00, 0x79, 0x0c, 0xaa}, ++ {0x00, 0xc8, 0x0f, 0xaa}, ++ {0x00, 0x79, 0x0d, 0xaa}, ++ {0x00, 0xc8, 0x20, 0xaa}, ++ {0x00, 0x79, 0x09, 0xaa}, ++ {0x00, 0xc8, 0x80, 0xaa}, ++ {0x00, 0x79, 0x02, 0xaa}, ++ {0x00, 0xc8, 0xc0, 0xaa}, ++ {0x00, 0x79, 0x03, 0xaa}, ++ {0x00, 0xc8, 0x40, 0xaa}, ++ {0x00, 0x79, 0x05, 0xaa}, ++ {0x00, 0xc8, 0x30, 0xaa}, ++ {0x00, 0x79, 0x26, 0xaa}, ++ {0x00, 0x11, 0x40, 0xaa}, ++ {0x00, 0x3a, 0x04, 0xaa}, ++ {0x00, 0x12, 0x00, 0xaa}, ++ {0x00, 0x40, 0xc0, 0xaa}, ++ {0x00, 0x8c, 0x00, 0xaa}, ++ {0x00, 0x17, 0x14, 0xaa}, ++ {0x00, 0x18, 0x02, 0xaa}, ++ {0x00, 0x32, 0x92, 0xaa}, ++ {0x00, 0x19, 0x02, 0xaa}, ++ {0x00, 0x1a, 0x7a, 0xaa}, ++ {0x00, 0x03, 0x0a, 0xaa}, ++ {0x00, 0x0c, 0x00, 0xaa}, ++ {0x00, 0x3e, 0x00, 0xaa}, ++ {0x00, 0x70, 0x3a, 0xaa}, ++ {0x00, 0x71, 0x35, 0xaa}, ++ {0x00, 0x72, 0x11, 0xaa}, ++ {0x00, 0x73, 0xf0, 0xaa}, ++ {0x00, 0xa2, 0x02, 0xaa}, ++ {0x00, 0xb1, 0x00, 0xaa}, ++ {0x00, 0xb1, 0x0c, 0xaa}, + {0x00, 0x1e, 0x37, 0xaa}, /* MVFP */ + {0x00, 0xaa, 0x14, 0xaa}, +- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa}, +- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa}, +- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa}, +- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa}, +- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa}, +- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa}, +- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa}, +- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa}, +- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa}, +- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa}, +- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc}, +- {0xb6, 0x03, 0x01, 0xcc}, {0xb6, 0x02, 0x40, 0xcc}, +- {0xb6, 0x05, 0x00, 0xcc}, {0xb6, 0x04, 0xf0, 0xcc}, +- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x21, 0xcc}, +- {0xb6, 0x18, 0x00, 0xcc}, {0xb6, 0x17, 0x96, 0xcc}, +- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc}, +- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc}, +- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc}, +- {0xbc, 0x02, 0x18, 0xcc}, {0xbc, 0x03, 0x50, 0xcc}, +- {0xbc, 0x04, 0x18, 0xcc}, {0xbc, 0x05, 0x00, 0xcc}, +- {0xbc, 0x06, 0x00, 0xcc}, {0xbc, 0x08, 0x30, 0xcc}, +- {0xbc, 0x09, 0x40, 0xcc}, {0xbc, 0x0a, 0x10, 0xcc}, +- {0xbc, 0x0b, 0x00, 0xcc}, {0xbc, 0x0c, 0x00, 0xcc}, +- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc}, +- {0x00, 0x77, 0x05, 0xaa }, ++ {0x00, 0x24, 0x80, 0xaa}, ++ {0x00, 0x25, 0x74, 0xaa}, ++ {0x00, 0x26, 0xd3, 0xaa}, ++ {0x00, 0x0d, 0x00, 0xaa}, ++ {0x00, 0x14, 0x18, 0xaa}, ++ {0x00, 0x9d, 0x99, 0xaa}, ++ {0x00, 0x9e, 0x7f, 0xaa}, ++ {0x00, 0x64, 0x08, 0xaa}, ++ {0x00, 0x94, 0x07, 0xaa}, ++ {0x00, 0x95, 0x06, 0xaa}, ++ {0x00, 0x66, 0x05, 0xaa}, ++ {0x00, 0x41, 0x08, 0xaa}, ++ {0x00, 0x3f, 0x00, 0xaa}, ++ {0x00, 0x75, 0x07, 0xaa}, ++ {0x00, 0x76, 0xe1, 0xaa}, ++ {0x00, 0x4c, 0x00, 0xaa}, ++ {0x00, 0x77, 0x00, 0xaa}, ++ {0x00, 0x3d, 0xc2, 0xaa}, ++ {0x00, 0x4b, 0x09, 0xaa}, ++ {0x00, 0xc9, 0x60, 0xaa}, ++ {0x00, 0x41, 0x38, 0xaa}, ++ {0xbc, 0x02, 0x18, 0xcc}, ++ {0xbc, 0x03, 0x50, 0xcc}, ++ {0xbc, 0x04, 0x18, 0xcc}, ++ {0xbc, 0x05, 0x00, 0xcc}, ++ {0xbc, 0x06, 0x00, 0xcc}, ++ {0xbc, 0x08, 0x30, 0xcc}, ++ {0xbc, 0x09, 0x40, 0xcc}, ++ {0xbc, 0x0a, 0x10, 0xcc}, ++ {0xbc, 0x0b, 0x00, 0xcc}, ++ {0xbc, 0x0c, 0x00, 0xcc}, ++ {0xbf, 0xc0, 0x26, 0xcc}, ++ {0xbf, 0xc1, 0x02, 0xcc}, ++ {0xbf, 0xcc, 0x04, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xb3, 0x01, 0x45, 0xcc}, ++ {0x00, 0x77, 0x05, 0xaa}, + {}, + }; + +@@ -3117,6 +3338,10 @@ static int sd_config(struct gspca_dev *gspca_dev, + cam->cam_mode = bi_mode; + cam->nmodes = ARRAY_SIZE(bi_mode); + break; ++ case SENSOR_OV7670: ++ cam->cam_mode = bi_mode; ++ cam->nmodes = ARRAY_SIZE(bi_mode) - 1; ++ break; + default: + cam->cam_mode = vc0323_mode; + cam->nmodes = ARRAY_SIZE(vc0323_mode) - 1; +@@ -3329,14 +3554,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + else + init = ov7660_initVGA_data; /* 640x480 */ + break; +- case SENSOR_OV7670: +- /*GammaT = ov7660_gamma; */ +- /*MatrixT = ov7660_matrix; */ +- if (mode) +- init = ov7670_initQVGA_JPG; /* 320x240 */ +- else +- init = ov7670_initVGA_JPG; /* 640x480 */ +- break; + case SENSOR_MI0360: + GammaT = mi1320_gamma; + MatrixT = mi0360_matrix; +@@ -3373,6 +3590,9 @@ static int sd_start(struct gspca_dev *gspca_dev) + MatrixT = mi1320_matrix; + init = mi1320_soc_init[mode]; + break; ++ case SENSOR_OV7670: ++ init = mode == 1 ? ov7670_InitVGA : ov7670_InitQVGA; ++ break; + case SENSOR_PO3130NC: + GammaT = po3130_gamma; + MatrixT = po3130_matrix; +@@ -3426,7 +3646,13 @@ static int sd_start(struct gspca_dev *gspca_dev) + sethvflip(gspca_dev); + setlightfreq(gspca_dev); + } +- if (sd->sensor == SENSOR_POxxxx) { ++ switch (sd->sensor) { ++ case SENSOR_OV7670: ++ reg_w(gspca_dev->dev, 0x87, 0xffff, 0xffff); ++ reg_w(gspca_dev->dev, 0x88, 0xff00, 0xf0f1); ++ reg_w(gspca_dev->dev, 0xa0, 0x0000, 0xbfff); ++ break; ++ case SENSOR_POxxxx: + setcolors(gspca_dev); + setbrightness(gspca_dev); + setcontrast(gspca_dev); +@@ -3435,6 +3661,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + msleep(80); + reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff); + usb_exchange(gspca_dev, poxxxx_init_end_2); ++ break; + } + return 0; + } diff --git a/linux-2.6-v4l-dvb-update.patch b/linux-2.6-v4l-dvb-update.patch new file mode 100644 index 000000000..e69de29bb diff --git a/linux-2.6-v4l-dvb-uvcvideo-update.patch b/linux-2.6-v4l-dvb-uvcvideo-update.patch new file mode 100644 index 000000000..672926376 --- /dev/null +++ b/linux-2.6-v4l-dvb-uvcvideo-update.patch @@ -0,0 +1,646 @@ +From: Laurent Pinchart +Date: Sat, 13 Mar 2010 21:12:15 +0000 (-0300) +Subject: V4L/DVB: uvcvideo: Add support for Packard Bell EasyNote MX52 integrated webcam +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=513640877c336551f7a4428eaff7a4eb0f42cb9e + +V4L/DVB: uvcvideo: Add support for Packard Bell EasyNote MX52 integrated webcam + +The camera requires the STREAM_NO_FID quirk. Add a corresponding entry +in the device IDs list. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index a192c51..43892bf 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -2104,6 +2104,15 @@ static struct usb_device_id uvc_ids[] = { + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_STREAM_NO_FID }, ++ /* Syntek (Packard Bell EasyNote MX52 */ ++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE ++ | USB_DEVICE_ID_MATCH_INT_INFO, ++ .idVendor = 0x174f, ++ .idProduct = 0x8a12, ++ .bInterfaceClass = USB_CLASS_VIDEO, ++ .bInterfaceSubClass = 1, ++ .bInterfaceProtocol = 0, ++ .driver_info = UVC_QUIRK_STREAM_NO_FID }, + /* Syntek (Asus F9SG) */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, +From: Laurent Pinchart +Date: Thu, 4 Mar 2010 10:51:25 +0000 (-0300) +Subject: V4L/DVB: uvcvideo: Add support for unbranded Arkmicro 18ec:3290 webcams +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=89c21df1c6c4b984bc791dbaf50691b19852acf3 + +V4L/DVB: uvcvideo: Add support for unbranded Arkmicro 18ec:3290 webcams + +The camera requires the PROBE_DEF quirk. Add a corresponding entry in +the device IDs list. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index a814820..a192c51 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -2168,6 +2168,15 @@ static struct usb_device_id uvc_ids[] = { + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_PROBE_MINMAX }, ++ /* Arkmicro unbranded */ ++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE ++ | USB_DEVICE_ID_MATCH_INT_INFO, ++ .idVendor = 0x18ec, ++ .idProduct = 0x3290, ++ .bInterfaceClass = USB_CLASS_VIDEO, ++ .bInterfaceSubClass = 1, ++ .bInterfaceProtocol = 0, ++ .driver_info = UVC_QUIRK_PROBE_DEF }, + /* Bodelin ProScopeHR */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_DEV_HI +From: Laurent Pinchart +Date: Wed, 31 Mar 2010 15:29:26 +0000 (-0300) +Subject: V4L/DVB: uvcvideo: Use POLLOUT and POLLWRNORM for output devices +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=a6eb7bc8e0eea78f96ad1b0f0195ec52b88c6a00 + +V4L/DVB: uvcvideo: Use POLLOUT and POLLWRNORM for output devices + +The V4L2 specification requires drivers to use the write events in the +file operations poll handler for output devices. The uvcvideo driver +erroneously used read events for all devices. Fix this. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c +index 4a925a3..133c78d 100644 +--- a/drivers/media/video/uvc/uvc_queue.c ++++ b/drivers/media/video/uvc/uvc_queue.c +@@ -388,8 +388,12 @@ unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, + + poll_wait(file, &buf->wait, wait); + if (buf->state == UVC_BUF_STATE_DONE || +- buf->state == UVC_BUF_STATE_ERROR) +- mask |= POLLIN | POLLRDNORM; ++ buf->state == UVC_BUF_STATE_ERROR) { ++ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ++ mask |= POLLIN | POLLRDNORM; ++ else ++ mask |= POLLOUT | POLLWRNORM; ++ } + + done: + mutex_unlock(&queue->mutex); +From: Laurent Pinchart +Date: Mon, 12 Apr 2010 13:41:22 +0000 (-0300) +Subject: V4L/DVB: uvcvideo: Add support for V4L2_PIX_FMT_Y16 +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=c947c3b45d8950312e3563a1a3316f910570d017 + +V4L/DVB: uvcvideo: Add support for V4L2_PIX_FMT_Y16 + +The Miricle 307K (17dc:0202) camera reports a 16-bit greyscale format, +support it in the driver. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index 43892bf..4801a2a 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -90,11 +90,16 @@ static struct uvc_format_desc uvc_fmts[] = { + .fcc = V4L2_PIX_FMT_UYVY, + }, + { +- .name = "Greyscale", ++ .name = "Greyscale (8-bit)", + .guid = UVC_GUID_FORMAT_Y800, + .fcc = V4L2_PIX_FMT_GREY, + }, + { ++ .name = "Greyscale (16-bit)", ++ .guid = UVC_GUID_FORMAT_Y16, ++ .fcc = V4L2_PIX_FMT_Y16, ++ }, ++ { + .name = "RGB Bayer", + .guid = UVC_GUID_FORMAT_BY8, + .fcc = V4L2_PIX_FMT_SBGGR8, +diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h +index 2bba059..d1f8840 100644 +--- a/drivers/media/video/uvc/uvcvideo.h ++++ b/drivers/media/video/uvc/uvcvideo.h +@@ -131,11 +131,13 @@ struct uvc_xu_control { + #define UVC_GUID_FORMAT_Y800 \ + { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} ++#define UVC_GUID_FORMAT_Y16 \ ++ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ ++ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} + #define UVC_GUID_FORMAT_BY8 \ + { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} + +- + /* ------------------------------------------------------------------------ + * Driver specific constants. + */ +From: Laurent Pinchart +Date: Sun, 25 Apr 2010 19:23:24 +0000 (-0300) +Subject: V4L/DVB: uvcvideo: Flag relative controls as write-only +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=4ac25db7e7f6116213f1c03039df00b18466a0dc + +V4L/DVB: uvcvideo: Flag relative controls as write-only + +The UVC relative controls (exposure time, iris, focus, zoom, pan/tilt) +are write-only (despite the UVC specification stating that the GET_CUR +request is mandatory). Mark the controls as such, and report the related +V4L2 controls V4L2_CTRL_FLAG_WRITE_ONLY. + +Signed-off-by: Laurent Pinchart +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c +index 3697d72..bf2a333 100644 +--- a/drivers/media/video/uvc/uvc_ctrl.c ++++ b/drivers/media/video/uvc/uvc_ctrl.c +@@ -216,8 +216,7 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL, + .index = 4, + .size = 1, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR +- | UVC_CONTROL_RESTORE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_RESTORE, + }, + { + .entity = UVC_GUID_UVC_CAMERA, +@@ -232,8 +231,9 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_CT_FOCUS_RELATIVE_CONTROL, + .index = 6, + .size = 2, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE +- | UVC_CONTROL_AUTO_UPDATE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN ++ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES ++ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, + }, + { + .entity = UVC_GUID_UVC_CAMERA, +@@ -248,8 +248,7 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_CT_IRIS_RELATIVE_CONTROL, + .index = 8, + .size = 1, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR +- | UVC_CONTROL_AUTO_UPDATE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_AUTO_UPDATE, + }, + { + .entity = UVC_GUID_UVC_CAMERA, +@@ -264,8 +263,9 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_CT_ZOOM_RELATIVE_CONTROL, + .index = 10, + .size = 3, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE +- | UVC_CONTROL_AUTO_UPDATE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN ++ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES ++ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, + }, + { + .entity = UVC_GUID_UVC_CAMERA, +@@ -280,8 +280,9 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_CT_PANTILT_RELATIVE_CONTROL, + .index = 12, + .size = 4, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE +- | UVC_CONTROL_AUTO_UPDATE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN ++ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES ++ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, + }, + { + .entity = UVC_GUID_UVC_CAMERA, +@@ -296,8 +297,9 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_CT_ROLL_RELATIVE_CONTROL, + .index = 14, + .size = 2, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE +- | UVC_CONTROL_AUTO_UPDATE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN ++ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES ++ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, + }, + { + .entity = UVC_GUID_UVC_CAMERA, +@@ -841,6 +843,8 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, + strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name); + v4l2_ctrl->flags = 0; + ++ if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR)) ++ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY; + if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR)) + v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; + +From: Laurent Pinchart +Date: Mon, 5 Jul 2010 18:24:39 +0000 (+0200) +Subject: uvcvideo: Power line frequency control doesn't support GET_MIN/MAX/RES +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=2596b09e32b45967dbdbfe80a10fb51d9a6c6839 + +uvcvideo: Power line frequency control doesn't support GET_MIN/MAX/RES + +Issuing a GET_MIN request on the power line frequency control times out +on at least the Apple iSight. As the UVC specification doesn't list +GET_MIN/MAX/RES as supported on that control, remove them from the +uvc_ctrls array. + +Signed-off-by: Laurent Pinchart +--- + +diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c +index aa0720a..27a79f0 100644 +--- a/drivers/media/video/uvc/uvc_ctrl.c ++++ b/drivers/media/video/uvc/uvc_ctrl.c +@@ -122,8 +122,8 @@ static struct uvc_control_info uvc_ctrls[] = { + .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, + .index = 10, + .size = 1, +- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE +- | UVC_CONTROL_RESTORE, ++ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR ++ | UVC_CONTROL_GET_DEF | UVC_CONTROL_RESTORE, + }, + { + .entity = UVC_GUID_UVC_PROCESSING, +From: Martin Rubli +Date: Wed, 19 May 2010 22:51:56 +0000 (+0200) +Subject: uvcvideo: Add support for absolute pan/tilt controls +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=d3c2f664ec76aff14c3841c99e84cd78d7227f79 + +uvcvideo: Add support for absolute pan/tilt controls + +Signed-off-by: Martin Rubli +--- + +diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c +index aa0720a..5ec2f4a 100644 +--- a/drivers/media/video/uvc/uvc_ctrl.c ++++ b/drivers/media/video/uvc/uvc_ctrl.c +@@ -606,6 +606,26 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = { + .set = uvc_ctrl_set_zoom, + }, + { ++ .id = V4L2_CID_PAN_ABSOLUTE, ++ .name = "Pan (Absolute)", ++ .entity = UVC_GUID_UVC_CAMERA, ++ .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, ++ .size = 32, ++ .offset = 0, ++ .v4l2_type = V4L2_CTRL_TYPE_INTEGER, ++ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, ++ }, ++ { ++ .id = V4L2_CID_TILT_ABSOLUTE, ++ .name = "Tilt (Absolute)", ++ .entity = UVC_GUID_UVC_CAMERA, ++ .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, ++ .size = 32, ++ .offset = 32, ++ .v4l2_type = V4L2_CTRL_TYPE_INTEGER, ++ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, ++ }, ++ { + .id = V4L2_CID_PRIVACY, + .name = "Privacy", + .entity = UVC_GUID_UVC_CAMERA, +From: Hans de Goede +Date: Wed, 19 May 2010 23:15:00 +0000 (+0200) +Subject: uvcvideo: Make button controls work properly +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=2bd47ad4894bfaf1a97660b821cbc46439a614d6 + +uvcvideo: Make button controls work properly + +According to the v4l2 spec, writing any value to a button control should +result in the action belonging to the button control being triggered. +UVC cams however want to see a 1 written, this patch fixes this by +overriding whatever value user space passed in with -1 (0xffffffff) when +the control is a button control. + +Signed-off-by: Hans de Goede +--- + +diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c +index 5ec2f4a..8bb825d 100644 +--- a/drivers/media/video/uvc/uvc_ctrl.c ++++ b/drivers/media/video/uvc/uvc_ctrl.c +@@ -698,6 +698,14 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping, + int offset = mapping->offset; + __u8 mask; + ++ /* According to the v4l2 spec, writing any value to a button control ++ * should result in the action belonging to the button control being ++ * triggered. UVC devices however want to see a 1 written -> override ++ * value. ++ */ ++ if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON) ++ value = -1; ++ + data += offset / 8; + offset &= 7; + +From: Laurent Pinchart +Date: Thu, 18 Feb 2010 19:38:52 +0000 (+0100) +Subject: uvcvideo: Support menu controls in the control mapping API +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=4930f2662e47d33e5baedac620da401a225bc3a8 + +uvcvideo: Support menu controls in the control mapping API + +The UVCIOC_CTRL_MAP ioctl doesn't support menu entries for menu +controls. As the uvc_xu_control_mapping structure has no reserved +fields, this can't be fixed while keeping ABI compatibility. + +Modify the UVCIOC_CTRL_MAP ioctl to add menu entries support, and define +UVCIOC_CTRL_MAP_OLD that supports the old ABI without any ability to add +menu controls. + +Signed-off-by: Laurent Pinchart +--- + +diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c +index 8bb825d..c88d72e 100644 +--- a/drivers/media/video/uvc/uvc_ctrl.c ++++ b/drivers/media/video/uvc/uvc_ctrl.c +@@ -1606,6 +1606,28 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev) + } + } + ++void uvc_ctrl_cleanup(void) ++{ ++ struct uvc_control_info *info; ++ struct uvc_control_info *ni; ++ struct uvc_control_mapping *mapping; ++ struct uvc_control_mapping *nm; ++ ++ list_for_each_entry_safe(info, ni, &uvc_driver.controls, list) { ++ if (!(info->flags & UVC_CONTROL_EXTENSION)) ++ continue; ++ ++ list_for_each_entry_safe(mapping, nm, &info->mappings, list) { ++ list_del(&mapping->list); ++ kfree(mapping->menu_info); ++ kfree(mapping); ++ } ++ ++ list_del(&info->list); ++ kfree(info); ++ } ++} ++ + void uvc_ctrl_init(void) + { + struct uvc_control_info *ctrl = uvc_ctrls; +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index 838b56f..34818c1 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -2261,6 +2261,7 @@ static int __init uvc_init(void) + static void __exit uvc_cleanup(void) + { + usb_deregister(&uvc_driver.driver); ++ uvc_ctrl_cleanup(); + } + + module_init(uvc_init); +diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c +index 7c9ab29..485a899 100644 +--- a/drivers/media/video/uvc/uvc_v4l2.c ++++ b/drivers/media/video/uvc/uvc_v4l2.c +@@ -29,6 +29,71 @@ + #include "uvcvideo.h" + + /* ------------------------------------------------------------------------ ++ * UVC ioctls ++ */ ++static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old) ++{ ++ struct uvc_control_mapping *map; ++ unsigned int size; ++ int ret; ++ ++ map = kzalloc(sizeof *map, GFP_KERNEL); ++ if (map == NULL) ++ return -ENOMEM; ++ ++ map->id = xmap->id; ++ memcpy(map->name, xmap->name, sizeof map->name); ++ memcpy(map->entity, xmap->entity, sizeof map->entity); ++ map->selector = xmap->selector; ++ map->size = xmap->size; ++ map->offset = xmap->offset; ++ map->v4l2_type = xmap->v4l2_type; ++ map->data_type = xmap->data_type; ++ ++ switch (xmap->v4l2_type) { ++ case V4L2_CTRL_TYPE_INTEGER: ++ case V4L2_CTRL_TYPE_BOOLEAN: ++ case V4L2_CTRL_TYPE_BUTTON: ++ break; ++ ++ case V4L2_CTRL_TYPE_MENU: ++ if (old) { ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ size = xmap->menu_count * sizeof(*map->menu_info); ++ map->menu_info = kmalloc(size, GFP_KERNEL); ++ if (map->menu_info == NULL) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ if (copy_from_user(map->menu_info, xmap->menu_info, size)) { ++ ret = -EFAULT; ++ goto done; ++ } ++ ++ map->menu_count = xmap->menu_count; ++ break; ++ ++ default: ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ ret = uvc_ctrl_add_mapping(map); ++ ++done: ++ if (ret < 0) { ++ kfree(map->menu_info); ++ kfree(map); ++ } ++ ++ return ret; ++} ++ ++/* ------------------------------------------------------------------------ + * V4L2 interface + */ + +@@ -974,7 +1039,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) + info->flags = xinfo->flags; + + info->flags |= UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX | +- UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF; ++ UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF | ++ UVC_CONTROL_EXTENSION; + + ret = uvc_ctrl_add_info(info); + if (ret < 0) +@@ -982,32 +1048,12 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) + break; + } + ++ case UVCIOC_CTRL_MAP_OLD: + case UVCIOC_CTRL_MAP: +- { +- struct uvc_xu_control_mapping *xmap = arg; +- struct uvc_control_mapping *map; +- + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + +- map = kzalloc(sizeof *map, GFP_KERNEL); +- if (map == NULL) +- return -ENOMEM; +- +- map->id = xmap->id; +- memcpy(map->name, xmap->name, sizeof map->name); +- memcpy(map->entity, xmap->entity, sizeof map->entity); +- map->selector = xmap->selector; +- map->size = xmap->size; +- map->offset = xmap->offset; +- map->v4l2_type = xmap->v4l2_type; +- map->data_type = xmap->data_type; +- +- ret = uvc_ctrl_add_mapping(map); +- if (ret < 0) +- kfree(map); +- break; +- } ++ return uvc_ioctl_ctrl_map(arg, cmd == UVCIOC_CTRL_MAP_OLD); + + case UVCIOC_CTRL_GET: + return uvc_xu_ctrl_query(chain, arg, 0); +diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h +index d1f8840..14f77e4 100644 +--- a/drivers/media/video/uvc/uvcvideo.h ++++ b/drivers/media/video/uvc/uvcvideo.h +@@ -27,6 +27,8 @@ + #define UVC_CONTROL_RESTORE (1 << 6) + /* Control can be updated by the camera. */ + #define UVC_CONTROL_AUTO_UPDATE (1 << 7) ++/* Control is an extension unit control. */ ++#define UVC_CONTROL_EXTENSION (1 << 8) + + #define UVC_CONTROL_GET_RANGE (UVC_CONTROL_GET_CUR | UVC_CONTROL_GET_MIN | \ + UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES | \ +@@ -40,6 +42,15 @@ struct uvc_xu_control_info { + __u32 flags; + }; + ++struct uvc_menu_info { ++ __u32 value; ++ __u8 name[32]; ++}; ++ ++struct uvc_xu_control_mapping_old { ++ __u8 reserved[64]; ++}; ++ + struct uvc_xu_control_mapping { + __u32 id; + __u8 name[32]; +@@ -50,6 +61,11 @@ struct uvc_xu_control_mapping { + __u8 offset; + enum v4l2_ctrl_type v4l2_type; + __u32 data_type; ++ ++ struct uvc_menu_info __user *menu_info; ++ __u32 menu_count; ++ ++ __u32 reserved[4]; + }; + + struct uvc_xu_control { +@@ -60,6 +76,7 @@ struct uvc_xu_control { + }; + + #define UVCIOC_CTRL_ADD _IOW('U', 1, struct uvc_xu_control_info) ++#define UVCIOC_CTRL_MAP_OLD _IOWR('U', 2, struct uvc_xu_control_mapping_old) + #define UVCIOC_CTRL_MAP _IOWR('U', 2, struct uvc_xu_control_mapping) + #define UVCIOC_CTRL_GET _IOWR('U', 3, struct uvc_xu_control) + #define UVCIOC_CTRL_SET _IOW('U', 4, struct uvc_xu_control) +@@ -198,11 +215,6 @@ struct uvc_streaming_control { + __u8 bMaxVersion; + }; + +-struct uvc_menu_info { +- __u32 value; +- __u8 name[32]; +-}; +- + struct uvc_control_info { + struct list_head list; + struct list_head mappings; +@@ -625,6 +637,7 @@ extern int uvc_ctrl_init_device(struct uvc_device *dev); + extern void uvc_ctrl_cleanup_device(struct uvc_device *dev); + extern int uvc_ctrl_resume_device(struct uvc_device *dev); + extern void uvc_ctrl_init(void); ++extern void uvc_ctrl_cleanup(void); + + extern int uvc_ctrl_begin(struct uvc_video_chain *chain); + extern int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback); +From: Laurent Pinchart +Date: Fri, 25 Jun 2010 07:58:43 +0000 (+0200) +Subject: uvcvideo: Add support for Manta MM-353 Plako +X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=352e661e1f347390a86cf34bc5e41adbdd1caa41 + +uvcvideo: Add support for Manta MM-353 Plako + +The camera requires the PROBE_MINMAX quirk. Add a corresponding entry +in the device IDs list + +Signed-off-by: Laurent Pinchart +--- + +diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c +index 34818c1..1a89384 100644 +--- a/drivers/media/video/uvc/uvc_driver.c ++++ b/drivers/media/video/uvc/uvc_driver.c +@@ -2174,6 +2174,15 @@ static struct usb_device_id uvc_ids[] = { + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS }, ++ /* Manta MM-353 Plako */ ++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE ++ | USB_DEVICE_ID_MATCH_INT_INFO, ++ .idVendor = 0x18ec, ++ .idProduct = 0x3188, ++ .bInterfaceClass = USB_CLASS_VIDEO, ++ .bInterfaceSubClass = 1, ++ .bInterfaceProtocol = 0, ++ .driver_info = UVC_QUIRK_PROBE_MINMAX }, + /* FSC WebCam V30S */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/linux-2.6-vio-modalias.patch b/linux-2.6-vio-modalias.patch new file mode 100644 index 000000000..2d56d6e36 --- /dev/null +++ b/linux-2.6-vio-modalias.patch @@ -0,0 +1,61 @@ +From: Benjamin Herrenschmidt +Date: Wed, 7 Apr 2010 04:44:28 +0000 (+1000) +Subject: powerpc/vio: Add modalias support +X-Git-Tag: v2.6.35-rc1~450^2~88 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=578b7cd1518f8d1b17a7fb1671d3d756c9cb49f1 + +powerpc/vio: Add modalias support + +BenH: Added to vio_cmo_dev_attrs as well + +Provide a modalias entry for VIO devices in sysfs. I believe +this was another initrd generation bugfix for anaconda. +Signed-off-by: David Woodhouse +Signed-off-by: Benjamin Herrenschmidt +--- + +diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c +index 8223717..2f57956 100644 +--- a/arch/powerpc/kernel/vio.c ++++ b/arch/powerpc/kernel/vio.c +@@ -958,9 +958,12 @@ viodev_cmo_rd_attr(allocated); + + static ssize_t name_show(struct device *, struct device_attribute *, char *); + static ssize_t devspec_show(struct device *, struct device_attribute *, char *); ++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, ++ char *buf); + static struct device_attribute vio_cmo_dev_attrs[] = { + __ATTR_RO(name), + __ATTR_RO(devspec), ++ __ATTR_RO(modalias), + __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, + viodev_cmo_desired_show, viodev_cmo_desired_set), + __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), +@@ -1320,9 +1323,27 @@ static ssize_t devspec_show(struct device *dev, + return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); + } + ++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ const struct vio_dev *vio_dev = to_vio_dev(dev); ++ struct device_node *dn; ++ const char *cp; ++ ++ dn = dev->archdata.of_node; ++ if (!dn) ++ return -ENODEV; ++ cp = of_get_property(dn, "compatible", NULL); ++ if (!cp) ++ return -ENODEV; ++ ++ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); ++} ++ + static struct device_attribute vio_dev_attrs[] = { + __ATTR_RO(name), + __ATTR_RO(devspec), ++ __ATTR_RO(modalias), + __ATTR_NULL + }; + diff --git a/linux-2.6-x86-cfi_sections.patch b/linux-2.6-x86-cfi_sections.patch new file mode 100644 index 000000000..646d4fea6 --- /dev/null +++ b/linux-2.6-x86-cfi_sections.patch @@ -0,0 +1,60 @@ +From 9e565292270a2d55524be38835104c564ac8f795 Mon Sep 17 00:00:00 2001 +From: Roland McGrath +Date: Thu, 13 May 2010 21:43:03 -0700 +Subject: [PATCH] x86: Use .cfi_sections for assembly code + +The newer assemblers support the .cfi_sections directive so we can put +the CFI from .S files into the .debug_frame section that is preserved +in unstripped vmlinux and in separate debuginfo, rather than the +.eh_frame section that is now discarded by vmlinux.lds.S. + +Signed-off-by: Roland McGrath +LKML-Reference: <20100514044303.A6FE7400BE@magilla.sf.frob.com> +Signed-off-by: H. Peter Anvin +--- + arch/x86/Makefile | 5 +++-- + arch/x86/include/asm/dwarf2.h | 12 ++++++++++++ + 2 files changed, 15 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 0a43dc5..8aa1b59 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp + cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) + # is .cfi_signal_frame supported too? + cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) +-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) +-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) ++cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) ++KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) ++KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) + + LDFLAGS := -m elf_$(UTS_MACHINE) + +diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h +index ae6253a..733f7e9 100644 +--- a/arch/x86/include/asm/dwarf2.h ++++ b/arch/x86/include/asm/dwarf2.h +@@ -34,6 +34,18 @@ + #define CFI_SIGNAL_FRAME + #endif + ++#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) ++ /* ++ * Emit CFI data in .debug_frame sections, not .eh_frame sections. ++ * The latter we currently just discard since we don't do DWARF ++ * unwinding at runtime. So only the offline DWARF information is ++ * useful to anyone. Note we should not use this directive if this ++ * file is used in the vDSO assembly, or if vmlinux.lds.S gets ++ * changed so it doesn't discard .eh_frame. ++ */ ++ .cfi_sections .debug_frame ++#endif ++ + #else + + /* +-- +1.7.0.1 + diff --git a/linux-2.6.29-sparc-IOC_TYPECHECK.patch b/linux-2.6.29-sparc-IOC_TYPECHECK.patch new file mode 100644 index 000000000..d73c30adc --- /dev/null +++ b/linux-2.6.29-sparc-IOC_TYPECHECK.patch @@ -0,0 +1,21 @@ +diff -up vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h.BAD vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h +--- vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h.BAD 2009-03-09 17:01:32.000000000 -0400 ++++ vanilla-2.6.29-rc7-git2/arch/sparc/include/asm/ioctl.h 2009-03-09 16:52:27.000000000 -0400 +@@ -41,6 +41,17 @@ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + ++#ifdef __KERNEL__ ++/* provoke compile error for invalid uses of size argument */ ++extern unsigned int __invalid_size_argument_for_IOC; ++#define _IOC_TYPECHECK(t) \ ++ ((sizeof(t) == sizeof(t[1]) && \ ++ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ ++ sizeof(t) : __invalid_size_argument_for_IOC) ++#else ++#define _IOC_TYPECHECK(t) (sizeof(t)) ++#endif ++ + #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) + #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) + #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) diff --git a/linux-2.6.30-no-pcspkr-modalias.patch b/linux-2.6.30-no-pcspkr-modalias.patch new file mode 100644 index 000000000..c703b8844 --- /dev/null +++ b/linux-2.6.30-no-pcspkr-modalias.patch @@ -0,0 +1,11 @@ +diff -up linux-2.6.30.noarch/drivers/input/misc/pcspkr.c.jx linux-2.6.30.noarch/drivers/input/misc/pcspkr.c +--- linux-2.6.30.noarch/drivers/input/misc/pcspkr.c.jx 2009-07-28 16:54:44.000000000 -0400 ++++ linux-2.6.30.noarch/drivers/input/misc/pcspkr.c 2009-07-28 16:59:36.000000000 -0400 +@@ -23,7 +23,6 @@ + MODULE_AUTHOR("Vojtech Pavlik "); + MODULE_DESCRIPTION("PC Speaker beeper driver"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("platform:pcspkr"); + + #if defined(CONFIG_MIPS) || defined(CONFIG_X86) + /* Use the global PIT lock ! */ diff --git a/lirc-2.6.33.patch b/lirc-2.6.33.patch new file mode 100644 index 000000000..0aab0c725 --- /dev/null +++ b/lirc-2.6.33.patch @@ -0,0 +1,16918 @@ + include/linux/lirc.h | 94 ++ + drivers/input/Kconfig | 2 + + drivers/input/Makefile | 2 + + drivers/input/lirc/Kconfig | 116 ++ + drivers/input/lirc/Makefile | 21 + + drivers/input/lirc/lirc_bt829.c | 383 ++++++ + drivers/input/lirc/lirc_dev.c | 736 ++++++++++ + drivers/input/lirc/lirc_dev.h | 225 +++ + drivers/input/lirc/lirc_ene0100.c | 646 +++++++++ + drivers/input/lirc/lirc_ene0100.h | 169 +++ + drivers/input/lirc/lirc_i2c.c | 536 ++++++++ + drivers/input/lirc/lirc_igorplugusb.c | 556 ++++++++ + drivers/input/lirc/lirc_imon.c | 1054 ++++++++++++++ + drivers/input/lirc/lirc_it87.c | 991 ++++++++++++++ + drivers/input/lirc/lirc_it87.h | 116 ++ + drivers/input/lirc/lirc_ite8709.c | 540 ++++++++ + drivers/input/lirc/lirc_mceusb.c | 1222 +++++++++++++++++ + drivers/input/lirc/lirc_parallel.c | 709 ++++++++++ + drivers/input/lirc/lirc_parallel.h | 26 + + drivers/input/lirc/lirc_sasem.c | 931 +++++++++++++ + drivers/input/lirc/lirc_serial.c | 1317 ++++++++++++++++++ + drivers/input/lirc/lirc_sir.c | 1283 +++++++++++++++++ + drivers/input/lirc/lirc_streamzap.c | 794 +++++++++++ + drivers/input/lirc/lirc_ttusbir.c | 397 ++++++ + drivers/input/lirc/lirc_zilog.c | 1396 +++++++++++++++++++ + drivers/input/misc/Kconfig | 12 + + drivers/input/misc/Makefile | 1 + + drivers/input/misc/imon.c | 2430 +++++++++++++++++++++++++++++++++ + 28 files changed, 16705 insertions(+), 0 deletions(-) + +diff --git a/include/linux/lirc.h b/include/linux/lirc.h +new file mode 100644 +index 0000000..8ae64fa +--- /dev/null ++++ b/include/linux/lirc.h +@@ -0,0 +1,94 @@ ++/* ++ * lirc.h - linux infrared remote control header file ++ * last modified 2007/09/27 ++ */ ++ ++#ifndef _LINUX_LIRC_H ++#define _LINUX_LIRC_H ++ ++#include ++#include ++ ++#define PULSE_BIT 0x01000000 ++#define PULSE_MASK 0x00FFFFFF ++ ++/*** lirc compatible hardware features ***/ ++ ++#define LIRC_MODE2SEND(x) (x) ++#define LIRC_SEND2MODE(x) (x) ++#define LIRC_MODE2REC(x) ((x) << 16) ++#define LIRC_REC2MODE(x) ((x) >> 16) ++ ++#define LIRC_MODE_RAW 0x00000001 ++#define LIRC_MODE_PULSE 0x00000002 ++#define LIRC_MODE_MODE2 0x00000004 ++#define LIRC_MODE_LIRCCODE 0x00000010 ++ ++ ++#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) ++#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) ++#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) ++#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) ++ ++#define LIRC_CAN_SEND_MASK 0x0000003f ++ ++#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 ++#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 ++#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 ++ ++#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) ++#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) ++#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) ++#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) ++ ++#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) ++ ++#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) ++#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) ++ ++#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 ++#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 ++#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 ++ ++#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) ++#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) ++ ++#define LIRC_CAN_NOTIFY_DECODE 0x01000000 ++ ++/*** IOCTL commands for lirc driver ***/ ++ ++#define LIRC_GET_FEATURES _IOR('i', 0x00000000, uint64_t) ++ ++#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, uint64_t) ++#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, uint64_t) ++#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, uint32_t) ++#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, uint32_t) ++#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, uint32_t) ++#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, uint32_t) ++#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, uint32_t) ++ ++/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ ++#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, uint64_t) ++ ++#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, uint64_t) ++#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, uint64_t) ++/* Note: these can reset the according pulse_width */ ++#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, uint32_t) ++#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, uint32_t) ++#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, uint32_t) ++#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, uint32_t) ++#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, uint32_t) ++ ++/* ++ * to set a range use ++ * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the ++ * lower bound first and later ++ * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound ++ */ ++ ++#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, uint32_t) ++#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, uint32_t) ++ ++#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) ++ ++#endif +diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig +index 07c2cd4..ebc8743 100644 +--- a/drivers/input/Kconfig ++++ b/drivers/input/Kconfig +@@ -183,6 +183,8 @@ source "drivers/input/tablet/Kconfig" + + source "drivers/input/touchscreen/Kconfig" + ++source "drivers/input/lirc/Kconfig" ++ + source "drivers/input/misc/Kconfig" + + endif +diff --git a/drivers/input/Makefile b/drivers/input/Makefile +index 7ad212d..cb119e7 100644 +--- a/drivers/input/Makefile ++++ b/drivers/input/Makefile +@@ -26,3 +26,5 @@ obj-$(CONFIG_INPUT_MISC) += misc/ + obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o + + obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o ++ ++obj-$(CONFIG_INPUT_LIRC) += lirc/ +diff --git a/drivers/input/lirc/Kconfig b/drivers/input/lirc/Kconfig +new file mode 100644 +index 0000000..86fc063 +--- /dev/null ++++ b/drivers/input/lirc/Kconfig +@@ -0,0 +1,116 @@ ++# ++# LIRC driver(s) configuration ++# ++menuconfig INPUT_LIRC ++ tristate "Linux Infrared Remote Control IR receiver/transmitter drivers" ++ help ++ Say Y here, and all supported Linux Infrared Remote Control IR and ++ RF receiver and transmitter drivers will be displayed. When paired ++ with a remote control and the lirc daemon, the receiver drivers ++ allow control of your Linux system via remote control. ++ ++if INPUT_LIRC ++ ++config LIRC_BT829 ++ tristate "BT829 based hardware" ++ depends on INPUT_LIRC ++ help ++ Driver for the IR interface on BT829-based hardware ++ ++config LIRC_ENE0100 ++ tristate "ENE KB3924/ENE0100 CIR Port Reciever" ++ depends on INPUT_LIRC ++ help ++ This is a driver for CIR port handled by ENE KB3924 embedded ++ controller found on some notebooks. ++ It appears on PNP list as ENE0100. ++ ++config LIRC_I2C ++ tristate "I2C Based IR Receivers" ++ depends on INPUT_LIRC ++ help ++ Driver for I2C-based IR receivers, such as those commonly ++ found onboard Hauppauge PVR-150/250/350 video capture cards ++ ++config LIRC_IGORPLUGUSB ++ tristate "Igor Cesko's USB IR Receiver" ++ depends on INPUT_LIRC && USB ++ help ++ Driver for Igor Cesko's USB IR Receiver ++ ++config LIRC_IMON ++ tristate "Legacy SoundGraph iMON Receiver and Display" ++ depends on INPUT_LIRC ++ help ++ Driver for the original SoundGraph iMON IR Receiver and Display ++ ++ Current generation iMON devices use the input layer imon driver. ++ ++config LIRC_IT87 ++ tristate "ITE IT87XX CIR Port Receiver" ++ depends on INPUT_LIRC ++ help ++ Driver for the ITE IT87xx IR Receiver ++ ++config LIRC_ITE8709 ++ tristate "ITE8709 CIR Port Receiver" ++ depends on INPUT_LIRC && PNP ++ help ++ Driver for the ITE8709 IR Receiver ++ ++config LIRC_MCEUSB ++ tristate "Windows Media Center Ed. USB IR Transceiver" ++ depends on INPUT_LIRC && USB ++ help ++ Driver for Windows Media Center Ed. USB IR Transceivers ++ ++config LIRC_PARALLEL ++ tristate "Homebrew Parallel Port Receiver" ++ depends on INPUT_LIRC && !SMP ++ help ++ Driver for Homebrew Parallel Port Receivers ++ ++config LIRC_SASEM ++ tristate "Sasem USB IR Remote" ++ depends on INPUT_LIRC ++ help ++ Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module ++ ++config LIRC_SERIAL ++ tristate "Homebrew Serial Port Receiver" ++ depends on INPUT_LIRC ++ help ++ Driver for Homebrew Serial Port Receivers ++ ++config LIRC_SERIAL_TRANSMITTER ++ bool "Serial Port Transmitter" ++ default y ++ depends on LIRC_SERIAL ++ help ++ Serial Port Transmitter support ++ ++config LIRC_SIR ++ tristate "Built-in SIR IrDA port" ++ depends on INPUT_LIRC ++ help ++ Driver for the SIR IrDA port ++ ++config LIRC_STREAMZAP ++ tristate "Streamzap PC Receiver" ++ depends on INPUT_LIRC ++ help ++ Driver for the Streamzap PC Receiver ++ ++config LIRC_TTUSBIR ++ tristate "Technotrend USB IR Receiver" ++ depends on INPUT_LIRC && USB ++ help ++ Driver for the Technotrend USB IR Receiver ++ ++config LIRC_ZILOG ++ tristate "Zilog/Hauppauge IR Transmitter" ++ depends on INPUT_LIRC ++ help ++ Driver for the Zilog/Hauppauge IR Transmitter, found on ++ PVR-150/500, HVR-1200/1250/1700/1800, HD-PVR and other cards ++endif +diff --git a/drivers/input/lirc/Makefile b/drivers/input/lirc/Makefile +new file mode 100644 +index 0000000..9122e87 +--- /dev/null ++++ b/drivers/input/lirc/Makefile +@@ -0,0 +1,21 @@ ++# Makefile for the lirc drivers. ++# ++ ++# Each configuration option enables a list of files. ++ ++obj-$(CONFIG_INPUT_LIRC) += lirc_dev.o ++obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o ++obj-$(CONFIG_LIRC_ENE0100) += lirc_ene0100.o ++obj-$(CONFIG_LIRC_I2C) += lirc_i2c.o ++obj-$(CONFIG_LIRC_IGORPLUGUSB) += lirc_igorplugusb.o ++obj-$(CONFIG_LIRC_IMON) += lirc_imon.o ++obj-$(CONFIG_LIRC_IT87) += lirc_it87.o ++obj-$(CONFIG_LIRC_ITE8709) += lirc_ite8709.o ++obj-$(CONFIG_LIRC_MCEUSB) += lirc_mceusb.o ++obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o ++obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o ++obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o ++obj-$(CONFIG_LIRC_SIR) += lirc_sir.o ++obj-$(CONFIG_LIRC_STREAMZAP) += lirc_streamzap.o ++obj-$(CONFIG_LIRC_TTUSBIR) += lirc_ttusbir.o ++obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o +diff --git a/drivers/input/lirc/lirc_bt829.c b/drivers/input/lirc/lirc_bt829.c +new file mode 100644 +index 0000000..0485884 +--- /dev/null ++++ b/drivers/input/lirc/lirc_bt829.c +@@ -0,0 +1,383 @@ ++/* ++ * Remote control driver for the TV-card based on bt829 ++ * ++ * by Leonid Froenchenko ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc_dev.h" ++ ++static int poll_main(void); ++static int atir_init_start(void); ++ ++static void write_index(unsigned char index, unsigned int value); ++static unsigned int read_index(unsigned char index); ++ ++static void do_i2c_start(void); ++static void do_i2c_stop(void); ++ ++static void seems_wr_byte(unsigned char al); ++static unsigned char seems_rd_byte(void); ++ ++static unsigned int read_index(unsigned char al); ++static void write_index(unsigned char ah, unsigned int edx); ++ ++static void cycle_delay(int cycle); ++ ++static void do_set_bits(unsigned char bl); ++static unsigned char do_get_bits(void); ++ ++#define DATA_PCI_OFF 0x7FFC00 ++#define WAIT_CYCLE 20 ++ ++#define DRIVER_NAME "lirc_bt829" ++ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG DRIVER_NAME ": "fmt, ## args); \ ++ } while (0) ++ ++static int atir_minor; ++static unsigned long pci_addr_phys; ++static unsigned char *pci_addr_lin; ++ ++static struct lirc_driver atir_driver; ++ ++static struct pci_dev *do_pci_probe(void) ++{ ++ struct pci_dev *my_dev; ++ my_dev = pci_get_device(PCI_VENDOR_ID_ATI, ++ PCI_DEVICE_ID_ATI_264VT, NULL); ++ if (my_dev) { ++ printk(KERN_ERR DRIVER_NAME ": Using device: %s\n", ++ pci_name(my_dev)); ++ pci_addr_phys = 0; ++ if (my_dev->resource[0].flags & IORESOURCE_MEM) { ++ pci_addr_phys = my_dev->resource[0].start; ++ printk(KERN_INFO DRIVER_NAME ": memory at 0x%08X \n", ++ (unsigned int)pci_addr_phys); ++ } ++ if (pci_addr_phys == 0) { ++ printk(KERN_ERR DRIVER_NAME ": no memory resource ?\n"); ++ return NULL; ++ } ++ } else { ++ printk(KERN_ERR DRIVER_NAME ": pci_probe failed\n"); ++ return NULL; ++ } ++ return my_dev; ++} ++ ++static int atir_add_to_buf(void *data, struct lirc_buffer *buf) ++{ ++ unsigned char key; ++ int status; ++ status = poll_main(); ++ key = (status >> 8) & 0xFF; ++ if (status & 0xFF) { ++ dprintk("reading key %02X\n", key); ++ lirc_buffer_write(buf, &key); ++ return 0; ++ } ++ return -ENODATA; ++} ++ ++static int atir_set_use_inc(void *data) ++{ ++ dprintk("driver is opened\n"); ++ return 0; ++} ++ ++static void atir_set_use_dec(void *data) ++{ ++ dprintk("driver is closed\n"); ++} ++ ++int init_module(void) ++{ ++ struct pci_dev *pdev; ++ ++ pdev = do_pci_probe(); ++ if (pdev == NULL) ++ return 1; ++ ++ if (!atir_init_start()) ++ return 1; ++ ++ strcpy(atir_driver.name, "ATIR"); ++ atir_driver.minor = -1; ++ atir_driver.code_length = 8; ++ atir_driver.sample_rate = 10; ++ atir_driver.data = 0; ++ atir_driver.add_to_buf = atir_add_to_buf; ++ atir_driver.set_use_inc = atir_set_use_inc; ++ atir_driver.set_use_dec = atir_set_use_dec; ++ atir_driver.dev = &pdev->dev; ++ atir_driver.owner = THIS_MODULE; ++ ++ atir_minor = lirc_register_driver(&atir_driver); ++ if (atir_minor < 0) { ++ printk(KERN_ERR DRIVER_NAME ": failed to register driver!\n"); ++ return atir_minor; ++ } ++ dprintk("driver is registered on minor %d\n", atir_minor); ++ ++ return 0; ++} ++ ++ ++void cleanup_module(void) ++{ ++ lirc_unregister_driver(atir_minor); ++} ++ ++ ++static int atir_init_start(void) ++{ ++ pci_addr_lin = ioremap(pci_addr_phys + DATA_PCI_OFF, 0x400); ++ if (pci_addr_lin == 0) { ++ printk(KERN_INFO DRIVER_NAME ": pci mem must be mapped\n"); ++ return 0; ++ } ++ return 1; ++} ++ ++static void cycle_delay(int cycle) ++{ ++ udelay(WAIT_CYCLE*cycle); ++} ++ ++ ++static int poll_main() ++{ ++ unsigned char status_high, status_low; ++ ++ do_i2c_start(); ++ ++ seems_wr_byte(0xAA); ++ seems_wr_byte(0x01); ++ ++ do_i2c_start(); ++ ++ seems_wr_byte(0xAB); ++ ++ status_low = seems_rd_byte(); ++ status_high = seems_rd_byte(); ++ ++ do_i2c_stop(); ++ ++ return (status_high << 8) | status_low; ++} ++ ++static void do_i2c_start(void) ++{ ++ do_set_bits(3); ++ cycle_delay(4); ++ ++ do_set_bits(1); ++ cycle_delay(7); ++ ++ do_set_bits(0); ++ cycle_delay(2); ++} ++ ++static void do_i2c_stop(void) ++{ ++ unsigned char bits; ++ bits = do_get_bits() & 0xFD; ++ do_set_bits(bits); ++ cycle_delay(1); ++ ++ bits |= 1; ++ do_set_bits(bits); ++ cycle_delay(2); ++ ++ bits |= 2; ++ do_set_bits(bits); ++ bits = 3; ++ do_set_bits(bits); ++ cycle_delay(2); ++} ++ ++static void seems_wr_byte(unsigned char value) ++{ ++ int i; ++ unsigned char reg; ++ ++ reg = do_get_bits(); ++ for (i = 0; i < 8; i++) { ++ if (value & 0x80) ++ reg |= 0x02; ++ else ++ reg &= 0xFD; ++ ++ do_set_bits(reg); ++ cycle_delay(1); ++ ++ reg |= 1; ++ do_set_bits(reg); ++ cycle_delay(1); ++ ++ reg &= 0xFE; ++ do_set_bits(reg); ++ cycle_delay(1); ++ value <<= 1; ++ } ++ cycle_delay(2); ++ ++ reg |= 2; ++ do_set_bits(reg); ++ ++ reg |= 1; ++ do_set_bits(reg); ++ ++ cycle_delay(1); ++ do_get_bits(); ++ ++ reg &= 0xFE; ++ do_set_bits(reg); ++ cycle_delay(3); ++} ++ ++static unsigned char seems_rd_byte(void) ++{ ++ int i; ++ int rd_byte; ++ unsigned char bits_2, bits_1; ++ ++ bits_1 = do_get_bits() | 2; ++ do_set_bits(bits_1); ++ ++ rd_byte = 0; ++ for (i = 0; i < 8; i++) { ++ bits_1 &= 0xFE; ++ do_set_bits(bits_1); ++ cycle_delay(2); ++ ++ bits_1 |= 1; ++ do_set_bits(bits_1); ++ cycle_delay(1); ++ ++ bits_2 = do_get_bits(); ++ if (bits_2 & 2) ++ rd_byte |= 1; ++ ++ rd_byte <<= 1; ++ } ++ ++ bits_1 = 0; ++ if (bits_2 == 0) ++ bits_1 |= 2; ++ ++ do_set_bits(bits_1); ++ cycle_delay(2); ++ ++ bits_1 |= 1; ++ do_set_bits(bits_1); ++ cycle_delay(3); ++ ++ bits_1 &= 0xFE; ++ do_set_bits(bits_1); ++ cycle_delay(2); ++ ++ rd_byte >>= 1; ++ rd_byte &= 0xFF; ++ return rd_byte; ++} ++ ++static void do_set_bits(unsigned char new_bits) ++{ ++ int reg_val; ++ reg_val = read_index(0x34); ++ if (new_bits & 2) { ++ reg_val &= 0xFFFFFFDF; ++ reg_val |= 1; ++ } else { ++ reg_val &= 0xFFFFFFFE; ++ reg_val |= 0x20; ++ } ++ reg_val |= 0x10; ++ write_index(0x34, reg_val); ++ ++ reg_val = read_index(0x31); ++ if (new_bits & 1) ++ reg_val |= 0x1000000; ++ else ++ reg_val &= 0xFEFFFFFF; ++ ++ reg_val |= 0x8000000; ++ write_index(0x31, reg_val); ++} ++ ++static unsigned char do_get_bits(void) ++{ ++ unsigned char bits; ++ int reg_val; ++ ++ reg_val = read_index(0x34); ++ reg_val |= 0x10; ++ reg_val &= 0xFFFFFFDF; ++ write_index(0x34, reg_val); ++ ++ reg_val = read_index(0x34); ++ bits = 0; ++ if (reg_val & 8) ++ bits |= 2; ++ else ++ bits &= 0xFD; ++ ++ reg_val = read_index(0x31); ++ if (reg_val & 0x1000000) ++ bits |= 1; ++ else ++ bits &= 0xFE; ++ ++ return bits; ++} ++ ++static unsigned int read_index(unsigned char index) ++{ ++ unsigned char *addr; ++ unsigned int value; ++ /* addr = pci_addr_lin + DATA_PCI_OFF + ((index & 0xFF) << 2); */ ++ addr = pci_addr_lin + ((index & 0xFF) << 2); ++ value = readl(addr); ++ return value; ++} ++ ++static void write_index(unsigned char index, unsigned int reg_val) ++{ ++ unsigned char *addr; ++ addr = pci_addr_lin + ((index & 0xFF) << 2); ++ writel(reg_val, addr); ++} ++ ++MODULE_AUTHOR("Froenchenko Leonid"); ++MODULE_DESCRIPTION("IR remote driver for bt829 based TV cards"); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Debug enabled or not"); +diff --git a/drivers/input/lirc/lirc_dev.c b/drivers/input/lirc/lirc_dev.c +new file mode 100644 +index 0000000..504e122 +--- /dev/null ++++ b/drivers/input/lirc/lirc_dev.c +@@ -0,0 +1,736 @@ ++/* ++ * LIRC base driver ++ * ++ * by Artur Lipowski ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++static int debug; ++ ++#define IRCTL_DEV_NAME "BaseRemoteCtl" ++#define NOPLUG -1 ++#define LOGHEAD "lirc_dev (%s[%d]): " ++ ++static dev_t lirc_base_dev; ++ ++struct irctl { ++ struct lirc_driver d; ++ int attached; ++ int open; ++ ++ struct mutex irctl_lock; ++ struct lirc_buffer *buf; ++ unsigned int chunk_size; ++ ++ struct task_struct *task; ++ long jiffies_to_wait; ++ ++ struct cdev cdev; ++}; ++ ++static DEFINE_MUTEX(lirc_dev_lock); ++ ++static struct irctl *irctls[MAX_IRCTL_DEVICES]; ++ ++/* Only used for sysfs but defined to void otherwise */ ++static struct class *lirc_class; ++ ++/* helper function ++ * initializes the irctl structure ++ */ ++static void init_irctl(struct irctl *ir) ++{ ++ dev_dbg(ir->d.dev, LOGHEAD "initializing irctl\n", ++ ir->d.name, ir->d.minor); ++ mutex_init(&ir->irctl_lock); ++ ir->d.minor = NOPLUG; ++} ++ ++static void cleanup(struct irctl *ir) ++{ ++ dev_dbg(ir->d.dev, LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor); ++ ++ device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); ++ ++ if (ir->buf != ir->d.rbuf) { ++ lirc_buffer_free(ir->buf); ++ kfree(ir->buf); ++ } ++ ir->buf = NULL; ++} ++ ++/* helper function ++ * reads key codes from driver and puts them into buffer ++ * returns 0 on success ++ */ ++static int add_to_buf(struct irctl *ir) ++{ ++ if (ir->d.add_to_buf) { ++ int res = -ENODATA; ++ int got_data = 0; ++ ++ /* ++ * service the device as long as it is returning ++ * data and we have space ++ */ ++get_data: ++ res = ir->d.add_to_buf(ir->d.data, ir->buf); ++ if (res == 0) { ++ got_data++; ++ goto get_data; ++ } ++ ++ if (res == -ENODEV) ++ kthread_stop(ir->task); ++ ++ return got_data ? 0 : res; ++ } ++ ++ return 0; ++} ++ ++/* main function of the polling thread ++ */ ++static int lirc_thread(void *irctl) ++{ ++ struct irctl *ir = irctl; ++ ++ dev_dbg(ir->d.dev, LOGHEAD "poll thread started\n", ++ ir->d.name, ir->d.minor); ++ ++ do { ++ if (ir->open) { ++ if (ir->jiffies_to_wait) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(ir->jiffies_to_wait); ++ } ++ if (kthread_should_stop()) ++ break; ++ if (!add_to_buf(ir)) ++ wake_up_interruptible(&ir->buf->wait_poll); ++ } else { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ } ++ } while (!kthread_should_stop()); ++ ++ dev_dbg(ir->d.dev, LOGHEAD "poll thread ended\n", ++ ir->d.name, ir->d.minor); ++ ++ return 0; ++} ++ ++ ++static struct file_operations fops = { ++ .owner = THIS_MODULE, ++ .read = lirc_dev_fop_read, ++ .write = lirc_dev_fop_write, ++ .poll = lirc_dev_fop_poll, ++ .ioctl = lirc_dev_fop_ioctl, ++ .open = lirc_dev_fop_open, ++ .release = lirc_dev_fop_close, ++}; ++ ++static int lirc_cdev_add(struct irctl *ir) ++{ ++ int retval; ++ struct lirc_driver *d = &ir->d; ++ ++ if (d->fops) { ++ cdev_init(&ir->cdev, d->fops); ++ ir->cdev.owner = d->owner; ++ } else { ++ cdev_init(&ir->cdev, &fops); ++ ir->cdev.owner = THIS_MODULE; ++ } ++ kobject_set_name(&ir->cdev.kobj, "lirc%d", d->minor); ++ ++ retval = cdev_add(&ir->cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1); ++ if (retval) ++ kobject_put(&ir->cdev.kobj); ++ ++ return retval; ++} ++ ++int lirc_register_driver(struct lirc_driver *d) ++{ ++ struct irctl *ir; ++ int minor; ++ int bytes_in_key; ++ unsigned int chunk_size; ++ unsigned int buffer_size; ++ int err; ++ ++ if (!d) { ++ printk(KERN_ERR "lirc_dev: lirc_register_driver: " ++ "driver pointer must be not NULL!\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ if (MAX_IRCTL_DEVICES <= d->minor) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "\"minor\" must be between 0 and %d (%d)!\n", ++ MAX_IRCTL_DEVICES-1, d->minor); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ if (1 > d->code_length || (BUFLEN * 8) < d->code_length) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "code length in bits for minor (%d) " ++ "must be less than %d!\n", ++ d->minor, BUFLEN * 8); ++ err = -EBADRQC; ++ goto out; ++ } ++ ++ dev_dbg(d->dev, "lirc_dev: lirc_register_driver: sample_rate: %d\n", ++ d->sample_rate); ++ if (d->sample_rate) { ++ if (2 > d->sample_rate || HZ < d->sample_rate) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "sample_rate must be between 2 and %d!\n", HZ); ++ err = -EBADRQC; ++ goto out; ++ } ++ if (!d->add_to_buf) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "add_to_buf cannot be NULL when " ++ "sample_rate is set\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ } else if (!(d->fops && d->fops->read) && !d->rbuf) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "fops->read and rbuf cannot all be NULL!\n"); ++ err = -EBADRQC; ++ goto out; ++ } else if (!d->rbuf) { ++ if (!(d->fops && d->fops->read && d->fops->poll && ++ d->fops->ioctl)) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "neither read, poll nor ioctl can be NULL!\n"); ++ err = -EBADRQC; ++ goto out; ++ } ++ } ++ ++ mutex_lock(&lirc_dev_lock); ++ ++ minor = d->minor; ++ ++ if (minor < 0) { ++ /* find first free slot for driver */ ++ for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++) ++ if (!irctls[minor]) ++ break; ++ if (MAX_IRCTL_DEVICES == minor) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "no free slots for drivers!\n"); ++ err = -ENOMEM; ++ goto out_lock; ++ } ++ } else if (irctls[minor]) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "minor (%d) just registered!\n", minor); ++ err = -EBUSY; ++ goto out_lock; ++ } ++ ++ ir = kzalloc(sizeof(struct irctl), GFP_KERNEL); ++ if (!ir) { ++ err = -ENOMEM; ++ goto out_lock; ++ } ++ init_irctl(ir); ++ irctls[minor] = ir; ++ d->minor = minor; ++ ++ if (d->sample_rate) { ++ ir->jiffies_to_wait = HZ / d->sample_rate; ++ } else { ++ /* it means - wait for external event in task queue */ ++ ir->jiffies_to_wait = 0; ++ } ++ ++ /* some safety check 8-) */ ++ d->name[sizeof(d->name)-1] = '\0'; ++ ++ bytes_in_key = BITS_TO_LONGS(d->code_length) + ++ (d->code_length % 8 ? 1 : 0); ++ buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key; ++ chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key; ++ ++ if (d->rbuf) { ++ ir->buf = d->rbuf; ++ } else { ++ ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!ir->buf) { ++ err = -ENOMEM; ++ goto out_lock; ++ } ++ err = lirc_buffer_init(ir->buf, chunk_size, buffer_size); ++ if (err) { ++ kfree(ir->buf); ++ goto out_lock; ++ } ++ } ++ ir->chunk_size = ir->buf->chunk_size; ++ ++ if (d->features == 0) ++ d->features = LIRC_CAN_REC_LIRCCODE; ++ ++ ir->d = *d; ++ ir->d.minor = minor; ++ ++ device_create(lirc_class, ir->d.dev, ++ MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL, ++ "lirc%u", ir->d.minor); ++ ++ if (d->sample_rate) { ++ /* try to fire up polling thread */ ++ ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev"); ++ if (IS_ERR(ir->task)) { ++ dev_err(d->dev, "lirc_dev: lirc_register_driver: " ++ "cannot run poll thread for minor = %d\n", ++ d->minor); ++ err = -ECHILD; ++ goto out_sysfs; ++ } ++ } ++ ++ err = lirc_cdev_add(ir); ++ if (err) ++ goto out_sysfs; ++ ++ ir->attached = 1; ++ mutex_unlock(&lirc_dev_lock); ++ ++ dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n", ++ ir->d.name, ir->d.minor); ++ return minor; ++ ++out_sysfs: ++ device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); ++out_lock: ++ mutex_unlock(&lirc_dev_lock); ++out: ++ return err; ++} ++EXPORT_SYMBOL(lirc_register_driver); ++ ++int lirc_unregister_driver(int minor) ++{ ++ struct irctl *ir; ++ ++ if (minor < 0 || minor >= MAX_IRCTL_DEVICES) { ++ printk(KERN_ERR "lirc_dev: lirc_unregister_driver: " ++ "\"minor (%d)\" must be between 0 and %d!\n", ++ minor, MAX_IRCTL_DEVICES-1); ++ return -EBADRQC; ++ } ++ ++ ir = irctls[minor]; ++ ++ mutex_lock(&lirc_dev_lock); ++ ++ if (ir->d.minor != minor) { ++ printk(KERN_ERR "lirc_dev: lirc_unregister_driver: " ++ "minor (%d) device not registered!", minor); ++ mutex_unlock(&lirc_dev_lock); ++ return -ENOENT; ++ } ++ ++ /* end up polling thread */ ++ if (ir->task) ++ kthread_stop(ir->task); ++ ++ dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n", ++ ir->d.name, ir->d.minor); ++ ++ ir->attached = 0; ++ if (ir->open) { ++ dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n", ++ ir->d.name, ir->d.minor); ++ wake_up_interruptible(&ir->buf->wait_poll); ++ mutex_lock(&ir->irctl_lock); ++ ir->d.set_use_dec(ir->d.data); ++ module_put(ir->d.owner); ++ mutex_unlock(&ir->irctl_lock); ++ cdev_del(&ir->cdev); ++ } else { ++ cleanup(ir); ++ cdev_del(&ir->cdev); ++ kfree(ir); ++ irctls[minor] = NULL; ++ } ++ ++ mutex_unlock(&lirc_dev_lock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(lirc_unregister_driver); ++ ++int lirc_dev_fop_open(struct inode *inode, struct file *file) ++{ ++ struct irctl *ir; ++ int retval = 0; ++ ++ if (iminor(inode) >= MAX_IRCTL_DEVICES) { ++ printk(KERN_WARNING "lirc_dev [%d]: open result = -ENODEV\n", ++ iminor(inode)); ++ return -ENODEV; ++ } ++ ++ if (mutex_lock_interruptible(&lirc_dev_lock)) ++ return -ERESTARTSYS; ++ ++ ir = irctls[iminor(inode)]; ++ if (!ir) { ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor); ++ ++ if (ir->d.minor == NOPLUG) { ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ if (ir->open) { ++ retval = -EBUSY; ++ goto error; ++ } ++ ++ if (try_module_get(ir->d.owner)) { ++ ++ir->open; ++ retval = ir->d.set_use_inc(ir->d.data); ++ ++ if (retval) { ++ module_put(ir->d.owner); ++ --ir->open; ++ } else { ++ lirc_buffer_clear(ir->buf); ++ } ++ if (ir->task) ++ wake_up_process(ir->task); ++ } ++ ++error: ++ if (ir) ++ dev_dbg(ir->d.dev, LOGHEAD "open result = %d\n", ++ ir->d.name, ir->d.minor, retval); ++ ++ mutex_unlock(&lirc_dev_lock); ++ ++ return retval; ++} ++EXPORT_SYMBOL(lirc_dev_fop_open); ++ ++int lirc_dev_fop_close(struct inode *inode, struct file *file) ++{ ++ struct irctl *ir = irctls[iminor(inode)]; ++ ++ dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor); ++ ++ WARN_ON(mutex_lock_killable(&lirc_dev_lock)); ++ ++ --ir->open; ++ if (ir->attached) { ++ ir->d.set_use_dec(ir->d.data); ++ module_put(ir->d.owner); ++ } else { ++ cleanup(ir); ++ irctls[ir->d.minor] = NULL; ++ kfree(ir); ++ } ++ ++ mutex_unlock(&lirc_dev_lock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(lirc_dev_fop_close); ++ ++unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait) ++{ ++ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; ++ unsigned int ret; ++ ++ dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor); ++ ++ if (!ir->attached) { ++ mutex_unlock(&ir->irctl_lock); ++ return POLLERR; ++ } ++ ++ poll_wait(file, &ir->buf->wait_poll, wait); ++ ++ if (ir->buf) ++ if (lirc_buffer_empty(ir->buf)) ++ ret = 0; ++ else ++ ret = POLLIN | POLLRDNORM; ++ else ++ ret = POLLERR; ++ ++ dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n", ++ ir->d.name, ir->d.minor, ret); ++ ++ return ret; ++} ++EXPORT_SYMBOL(lirc_dev_fop_poll); ++ ++int lirc_dev_fop_ioctl(struct inode *inode, struct file *file, ++ unsigned int cmd, unsigned long arg) ++{ ++ unsigned long mode; ++ int result = 0; ++ struct irctl *ir = irctls[iminor(inode)]; ++ ++ dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n", ++ ir->d.name, ir->d.minor, cmd); ++ ++ if (ir->d.minor == NOPLUG || !ir->attached) { ++ dev_dbg(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n", ++ ir->d.name, ir->d.minor); ++ return -ENODEV; ++ } ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ result = put_user(ir->d.features, (unsigned long *)arg); ++ break; ++ case LIRC_GET_REC_MODE: ++ if (!(ir->d.features & LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = put_user(LIRC_REC2MODE ++ (ir->d.features & LIRC_CAN_REC_MASK), ++ (unsigned long *)arg); ++ break; ++ case LIRC_SET_REC_MODE: ++ if (!(ir->d.features & LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = get_user(mode, (unsigned long *)arg); ++ if (!result && !(LIRC_MODE2REC(mode) & ir->d.features)) ++ result = -EINVAL; ++ /* ++ * FIXME: We should actually set the mode somehow but ++ * for now, lirc_serial doesn't support mode changing either ++ */ ++ break; ++ case LIRC_GET_LENGTH: ++ result = put_user(ir->d.code_length, (unsigned long *)arg); ++ break; ++ default: ++ result = -EINVAL; ++ } ++ ++ dev_dbg(ir->d.dev, LOGHEAD "ioctl result = %d\n", ++ ir->d.name, ir->d.minor, result); ++ ++ return result; ++} ++EXPORT_SYMBOL(lirc_dev_fop_ioctl); ++ ++ssize_t lirc_dev_fop_read(struct file *file, ++ char *buffer, ++ size_t length, ++ loff_t *ppos) ++{ ++ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; ++ unsigned char buf[ir->chunk_size]; ++ int ret = 0, written = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor); ++ ++ if (mutex_lock_interruptible(&ir->irctl_lock)) ++ return -ERESTARTSYS; ++ if (!ir->attached) { ++ mutex_unlock(&ir->irctl_lock); ++ return -ENODEV; ++ } ++ ++ if (length % ir->chunk_size) { ++ dev_dbg(ir->d.dev, LOGHEAD "read result = -EINVAL\n", ++ ir->d.name, ir->d.minor); ++ mutex_unlock(&ir->irctl_lock); ++ return -EINVAL; ++ } ++ ++ /* ++ * we add ourselves to the task queue before buffer check ++ * to avoid losing scan code (in case when queue is awaken somewhere ++ * between while condition checking and scheduling) ++ */ ++ add_wait_queue(&ir->buf->wait_poll, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ /* ++ * while we didn't provide 'length' bytes, device is opened in blocking ++ * mode and 'copy_to_user' is happy, wait for data. ++ */ ++ while (written < length && ret == 0) { ++ if (lirc_buffer_empty(ir->buf)) { ++ /* According to the read(2) man page, 'written' can be ++ * returned as less than 'length', instead of blocking ++ * again, returning -EWOULDBLOCK, or returning ++ * -ERESTARTSYS */ ++ if (written) ++ break; ++ if (file->f_flags & O_NONBLOCK) { ++ ret = -EWOULDBLOCK; ++ break; ++ } ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ ++ mutex_unlock(&ir->irctl_lock); ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ if (mutex_lock_interruptible(&ir->irctl_lock)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ ++ if (!ir->attached) { ++ ret = -ENODEV; ++ break; ++ } ++ } else { ++ lirc_buffer_read(ir->buf, buf); ++ ret = copy_to_user((void *)buffer+written, buf, ++ ir->buf->chunk_size); ++ written += ir->buf->chunk_size; ++ } ++ } ++ ++ remove_wait_queue(&ir->buf->wait_poll, &wait); ++ set_current_state(TASK_RUNNING); ++ mutex_unlock(&ir->irctl_lock); ++ ++ dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n", ++ ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret); ++ ++ return ret ? ret : written; ++} ++EXPORT_SYMBOL(lirc_dev_fop_read); ++ ++void *lirc_get_pdata(struct file *file) ++{ ++ void *data = NULL; ++ ++ if (file && file->f_dentry && file->f_dentry->d_inode && ++ file->f_dentry->d_inode->i_rdev) { ++ struct irctl *ir; ++ ir = irctls[iminor(file->f_dentry->d_inode)]; ++ data = ir->d.data; ++ } ++ ++ return data; ++} ++EXPORT_SYMBOL(lirc_get_pdata); ++ ++ ++ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, ++ size_t length, loff_t *ppos) ++{ ++ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; ++ ++ dev_dbg(ir->d.dev, LOGHEAD "write called\n", ir->d.name, ir->d.minor); ++ ++ if (!ir->attached) ++ return -ENODEV; ++ ++ return -EINVAL; ++} ++EXPORT_SYMBOL(lirc_dev_fop_write); ++ ++ ++static int __init lirc_dev_init(void) ++{ ++ int retval; ++ ++ lirc_class = class_create(THIS_MODULE, "lirc"); ++ if (IS_ERR(lirc_class)) { ++ retval = PTR_ERR(lirc_class); ++ printk(KERN_ERR "lirc_dev: class_create failed\n"); ++ goto error; ++ } ++ ++ retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES, ++ IRCTL_DEV_NAME); ++ if (retval) { ++ class_destroy(lirc_class); ++ printk(KERN_ERR "lirc_dev: alloc_chrdev_region failed\n"); ++ goto error; ++ } ++ ++ ++ printk(KERN_INFO "lirc_dev: IR Remote Control driver registered, " ++ "major %d \n", MAJOR(lirc_base_dev)); ++ ++error: ++ return retval; ++} ++ ++ ++ ++static void __exit lirc_dev_exit(void) ++{ ++ class_destroy(lirc_class); ++ unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES); ++ printk(KERN_INFO "lirc_dev: module unloaded\n"); ++} ++ ++module_init(lirc_dev_init); ++module_exit(lirc_dev_exit); ++ ++MODULE_DESCRIPTION("LIRC base driver module"); ++MODULE_AUTHOR("Artur Lipowski"); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); +diff --git a/drivers/input/lirc/lirc_dev.h b/drivers/input/lirc/lirc_dev.h +new file mode 100644 +index 0000000..99d0442 +--- /dev/null ++++ b/drivers/input/lirc/lirc_dev.h +@@ -0,0 +1,225 @@ ++/* ++ * LIRC base driver ++ * ++ * by Artur Lipowski ++ * This code is licensed under GNU GPL ++ * ++ */ ++ ++#ifndef _LINUX_LIRC_DEV_H ++#define _LINUX_LIRC_DEV_H ++ ++#define MAX_IRCTL_DEVICES 4 ++#define BUFLEN 16 ++ ++#define mod(n, div) ((n) % (div)) ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct lirc_buffer { ++ wait_queue_head_t wait_poll; ++ spinlock_t fifo_lock; ++ unsigned int chunk_size; ++ unsigned int size; /* in chunks */ ++ /* Using chunks instead of bytes pretends to simplify boundary checking ++ * And should allow for some performance fine tunning later */ ++ struct kfifo fifo; ++ u8 fifo_initialized; ++}; ++ ++static inline void lirc_buffer_clear(struct lirc_buffer *buf) ++{ ++ unsigned long flags; ++ ++ if (buf->fifo_initialized) { ++ spin_lock_irqsave(&buf->fifo_lock, flags); ++ kfifo_reset(&buf->fifo); ++ spin_unlock_irqrestore(&buf->fifo_lock, flags); ++ } else ++ WARN(1, "calling %s on an uninitialized lirc_buffer\n", ++ __func__); ++} ++ ++static inline int lirc_buffer_init(struct lirc_buffer *buf, ++ unsigned int chunk_size, ++ unsigned int size) ++{ ++ int ret; ++ ++ init_waitqueue_head(&buf->wait_poll); ++ spin_lock_init(&buf->fifo_lock); ++ buf->chunk_size = chunk_size; ++ buf->size = size; ++ ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); ++ if (ret == 0) ++ buf->fifo_initialized = 1; ++ ++ return ret; ++} ++ ++static inline void lirc_buffer_free(struct lirc_buffer *buf) ++{ ++ if (buf->fifo_initialized) { ++ kfifo_free(&buf->fifo); ++ buf->fifo_initialized = 0; ++ } else ++ WARN(1, "calling %s on an uninitialized lirc_buffer\n", ++ __func__); ++} ++ ++static inline int lirc_buffer_len(struct lirc_buffer *buf) ++{ ++ int len; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&buf->fifo_lock, flags); ++ len = kfifo_len(&buf->fifo); ++ spin_unlock_irqrestore(&buf->fifo_lock, flags); ++ ++ return len; ++} ++ ++static inline int lirc_buffer_full(struct lirc_buffer *buf) ++{ ++ return lirc_buffer_len(buf) == buf->size * buf->chunk_size; ++} ++ ++static inline int lirc_buffer_empty(struct lirc_buffer *buf) ++{ ++ return !lirc_buffer_len(buf); ++} ++ ++static inline int lirc_buffer_available(struct lirc_buffer *buf) ++{ ++ return buf->size - (lirc_buffer_len(buf) / buf->chunk_size); ++} ++ ++static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf, ++ unsigned char *dest) ++{ ++ unsigned int ret = 0; ++ ++ if (lirc_buffer_len(buf) >= buf->chunk_size) ++ ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size, ++ &buf->fifo_lock); ++ return ret; ++ ++} ++ ++static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf, ++ unsigned char *orig) ++{ ++ unsigned int ret; ++ ++ ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size, ++ &buf->fifo_lock); ++ ++ return ret; ++} ++ ++struct lirc_driver { ++ char name[40]; ++ int minor; ++ unsigned long code_length; ++ unsigned int buffer_size; /* in chunks holding one code each */ ++ int sample_rate; ++ unsigned long features; ++ ++ unsigned int chunk_size; ++ ++ void *data; ++ int (*add_to_buf) (void *data, struct lirc_buffer *buf); ++ struct lirc_buffer *rbuf; ++ int (*set_use_inc) (void *data); ++ void (*set_use_dec) (void *data); ++ struct file_operations *fops; ++ struct device *dev; ++ struct module *owner; ++}; ++ ++/* name: ++ * this string will be used for logs ++ * ++ * minor: ++ * indicates minor device (/dev/lirc) number for registered driver ++ * if caller fills it with negative value, then the first free minor ++ * number will be used (if available) ++ * ++ * code_length: ++ * length of the remote control key code expressed in bits ++ * ++ * sample_rate: ++ * ++ * data: ++ * it may point to any driver data and this pointer will be passed to ++ * all callback functions ++ * ++ * add_to_buf: ++ * add_to_buf will be called after specified period of the time or ++ * triggered by the external event, this behavior depends on value of ++ * the sample_rate this function will be called in user context. This ++ * routine should return 0 if data was added to the buffer and ++ * -ENODATA if none was available. This should add some number of bits ++ * evenly divisible by code_length to the buffer ++ * ++ * rbuf: ++ * if not NULL, it will be used as a read buffer, you will have to ++ * write to the buffer by other means, like irq's (see also ++ * lirc_serial.c). ++ * ++ * set_use_inc: ++ * set_use_inc will be called after device is opened ++ * ++ * set_use_dec: ++ * set_use_dec will be called after device is closed ++ * ++ * fops: ++ * file_operations for drivers which don't fit the current driver model. ++ * ++ * Some ioctl's can be directly handled by lirc_dev if the driver's ++ * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also ++ * lirc_serial.c). ++ * ++ * owner: ++ * the module owning this struct ++ * ++ */ ++ ++ ++/* following functions can be called ONLY from user context ++ * ++ * returns negative value on error or minor number ++ * of the registered device if success ++ * contents of the structure pointed by p is copied ++ */ ++extern int lirc_register_driver(struct lirc_driver *d); ++ ++/* returns negative value on error or 0 if success ++*/ ++extern int lirc_unregister_driver(int minor); ++ ++/* Returns the private data stored in the lirc_driver ++ * associated with the given device file pointer. ++ */ ++void *lirc_get_pdata(struct file *file); ++ ++/* default file operations ++ * used by drivers if they override only some operations ++ */ ++int lirc_dev_fop_open(struct inode *inode, struct file *file); ++int lirc_dev_fop_close(struct inode *inode, struct file *file); ++unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait); ++int lirc_dev_fop_ioctl(struct inode *inode, struct file *file, ++ unsigned int cmd, unsigned long arg); ++ssize_t lirc_dev_fop_read(struct file *file, char *buffer, size_t length, ++ loff_t *ppos); ++ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, size_t length, ++ loff_t *ppos); ++long lirc_dev_fop_compat_ioctl(struct file *file, unsigned int cmd32, ++ unsigned long arg); ++ ++#endif +diff --git a/drivers/input/lirc/lirc_ene0100.c b/drivers/input/lirc/lirc_ene0100.c +new file mode 100644 +index 0000000..a152c52 +--- /dev/null ++++ b/drivers/input/lirc/lirc_ene0100.c +@@ -0,0 +1,646 @@ ++/* ++ * driver for ENE KB3926 B/C/D CIR (also known as ENE0100) ++ * ++ * Copyright (C) 2009 Maxim Levitsky ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 ++ * USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "lirc_ene0100.h" ++ ++static int sample_period = 75; ++static int enable_idle = 1; ++static int enable_learning; ++ ++static void ene_set_idle(struct ene_device *dev, int idle); ++static void ene_set_inputs(struct ene_device *dev, int enable); ++ ++/* read a hardware register */ ++static u8 ene_hw_read_reg(struct ene_device *dev, u16 reg) ++{ ++ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI); ++ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO); ++ return inb(dev->hw_io + ENE_IO); ++} ++ ++/* write a hardware register */ ++static void ene_hw_write_reg(struct ene_device *dev, u16 reg, u8 value) ++{ ++ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI); ++ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO); ++ outb(value, dev->hw_io + ENE_IO); ++} ++ ++/* change specific bits in hardware register */ ++static void ene_hw_write_reg_mask(struct ene_device *dev, ++ u16 reg, u8 value, u8 mask) ++{ ++ u8 regvalue; ++ ++ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI); ++ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO); ++ ++ regvalue = inb(dev->hw_io + ENE_IO) & ~mask; ++ regvalue |= (value & mask); ++ outb(regvalue, dev->hw_io + ENE_IO); ++} ++ ++/* read irq status and ack it */ ++static int ene_hw_irq_status(struct ene_device *dev, int *buffer_pointer) ++{ ++ u8 irq_status; ++ u8 fw_flags1, fw_flags2; ++ ++ fw_flags2 = ene_hw_read_reg(dev, ENE_FW2); ++ ++ if (buffer_pointer) ++ *buffer_pointer = 4 * (fw_flags2 & ENE_FW2_BUF_HIGH); ++ ++ if (dev->hw_revision < ENE_HW_C) { ++ irq_status = ene_hw_read_reg(dev, ENEB_IRQ_STATUS); ++ ++ if (!(irq_status & ENEB_IRQ_STATUS_IR)) ++ return 0; ++ ene_hw_write_reg(dev, ENEB_IRQ_STATUS, ++ irq_status & ~ENEB_IRQ_STATUS_IR); ++ ++ /* rev B support only recieving */ ++ return ENE_IRQ_RX; ++ } ++ ++ irq_status = ene_hw_read_reg(dev, ENEC_IRQ); ++ ++ if (!(irq_status & ENEC_IRQ_STATUS)) ++ return 0; ++ ++ /* original driver does that twice - a workaround ? */ ++ ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS); ++ ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS); ++ ++ /* clear unknown flag in F8F9 */ ++ if (fw_flags2 & ENE_FW2_IRQ_CLR) ++ ene_hw_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_IRQ_CLR); ++ ++ /* check if this is a TX interrupt */ ++ fw_flags1 = ene_hw_read_reg(dev, ENE_FW1); ++ ++ if (fw_flags1 & ENE_FW1_TXIRQ) { ++ ene_hw_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ); ++ return ENE_IRQ_TX; ++ } else ++ return ENE_IRQ_RX; ++} ++ ++static int ene_hw_detect(struct ene_device *dev) ++{ ++ u8 chip_major, chip_minor; ++ u8 hw_revision, old_ver; ++ u8 tmp; ++ u8 fw_capabilities; ++ ++ tmp = ene_hw_read_reg(dev, ENE_HW_UNK); ++ ene_hw_write_reg(dev, ENE_HW_UNK, tmp & ~ENE_HW_UNK_CLR); ++ ++ chip_major = ene_hw_read_reg(dev, ENE_HW_VER_MAJOR); ++ chip_minor = ene_hw_read_reg(dev, ENE_HW_VER_MINOR); ++ ++ ene_hw_write_reg(dev, ENE_HW_UNK, tmp); ++ hw_revision = ene_hw_read_reg(dev, ENE_HW_VERSION); ++ old_ver = ene_hw_read_reg(dev, ENE_HW_VER_OLD); ++ ++ if (hw_revision == 0xFF) { ++ ++ ene_printk(KERN_WARNING, "device seems to be disabled\n"); ++ ene_printk(KERN_WARNING, ++ "send a mail to lirc-list@lists.sourceforge.net\n"); ++ ene_printk(KERN_WARNING, "please attach output of acpidump\n"); ++ ++ return -ENODEV; ++ } ++ ++ if (chip_major == 0x33) { ++ ene_printk(KERN_WARNING, "chips 0x33xx aren't supported yet\n"); ++ return -ENODEV; ++ } ++ ++ if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) { ++ dev->hw_revision = ENE_HW_C; ++ ene_printk(KERN_WARNING, ++ "KB3926C detected, driver support is not complete!\n"); ++ ++ } else if (old_ver == 0x24 && hw_revision == 0xC0) { ++ dev->hw_revision = ENE_HW_B; ++ ene_printk(KERN_NOTICE, "KB3926B detected\n"); ++ } else { ++ dev->hw_revision = ENE_HW_D; ++ ene_printk(KERN_WARNING, ++ "unknown ENE chip detected, assuming KB3926D\n"); ++ ene_printk(KERN_WARNING, "driver support incomplete"); ++ ++ } ++ ++ ene_printk(KERN_DEBUG, "chip is 0x%02x%02x - 0x%02x, 0x%02x\n", ++ chip_major, chip_minor, old_ver, hw_revision); ++ ++ ++ /* detect features hardware supports */ ++ ++ if (dev->hw_revision < ENE_HW_C) ++ return 0; ++ ++ fw_capabilities = ene_hw_read_reg(dev, ENE_FW2); ++ ++ dev->hw_gpio40_learning = fw_capabilities & ENE_FW2_GP40_AS_LEARN; ++ dev->hw_learning_and_tx_capable = fw_capabilities & ENE_FW2_LEARNING; ++ ++ dev->hw_fan_as_normal_input = dev->hw_learning_and_tx_capable && ++ fw_capabilities & ENE_FW2_FAN_AS_NRML_IN; ++ ++ ene_printk(KERN_NOTICE, "hardware features:\n"); ++ ene_printk(KERN_NOTICE, ++ "learning and tx %s, gpio40_learn %s, fan_in %s\n", ++ dev->hw_learning_and_tx_capable ? "on" : "off", ++ dev->hw_gpio40_learning ? "on" : "off", ++ dev->hw_fan_as_normal_input ? "on" : "off"); ++ ++ if (!dev->hw_learning_and_tx_capable && enable_learning) ++ enable_learning = 0; ++ ++ if (dev->hw_learning_and_tx_capable) { ++ ene_printk(KERN_WARNING, ++ "Device supports transmitting, but the driver doesn't\n"); ++ ene_printk(KERN_WARNING, ++ "due to lack of hardware to test against.\n"); ++ ene_printk(KERN_WARNING, ++ "Send a mail to: lirc-list@lists.sourceforge.net\n"); ++ } ++ return 0; ++} ++ ++/* hardware initialization */ ++static int ene_hw_init(void *data) ++{ ++ u8 reg_value; ++ struct ene_device *dev = (struct ene_device *)data; ++ dev->in_use = 1; ++ ++ if (dev->hw_revision < ENE_HW_C) { ++ ene_hw_write_reg(dev, ENEB_IRQ, dev->irq << 1); ++ ene_hw_write_reg(dev, ENEB_IRQ_UNK1, 0x01); ++ } else { ++ reg_value = ene_hw_read_reg(dev, ENEC_IRQ) & 0xF0; ++ reg_value |= ENEC_IRQ_UNK_EN; ++ reg_value &= ~ENEC_IRQ_STATUS; ++ reg_value |= (dev->irq & ENEC_IRQ_MASK); ++ ene_hw_write_reg(dev, ENEC_IRQ, reg_value); ++ ene_hw_write_reg(dev, ENE_TX_UNK1, 0x63); ++ } ++ ++ ene_hw_write_reg(dev, ENE_CIR_CONF2, 0x00); ++ ene_set_inputs(dev, enable_learning); ++ ++ /* set sampling period */ ++ ene_hw_write_reg(dev, ENE_CIR_SAMPLE_PERIOD, sample_period); ++ ++ /* ack any pending irqs - just in case */ ++ ene_hw_irq_status(dev, NULL); ++ ++ /* enter idle mode */ ++ ene_set_idle(dev, 1); ++ ++ /* enable firmware bits */ ++ ene_hw_write_reg_mask(dev, ENE_FW1, ++ ENE_FW1_ENABLE | ENE_FW1_IRQ, ++ ENE_FW1_ENABLE | ENE_FW1_IRQ); ++ /* clear stats */ ++ dev->sample = 0; ++ return 0; ++} ++ ++/* this enables gpio40 signal, used if connected to wide band input*/ ++static void ene_enable_gpio40(struct ene_device *dev, int enable) ++{ ++ ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, enable ? ++ 0 : ENE_CIR_CONF2_GPIO40DIS, ++ ENE_CIR_CONF2_GPIO40DIS); ++} ++ ++/* this enables the classic sampler */ ++static void ene_enable_normal_recieve(struct ene_device *dev, int enable) ++{ ++ ene_hw_write_reg(dev, ENE_CIR_CONF1, enable ? ENE_CIR_CONF1_ADC_ON : 0); ++} ++ ++/* this enables recieve via fan input */ ++static void ene_enable_fan_recieve(struct ene_device *dev, int enable) ++{ ++ if (!enable) ++ ene_hw_write_reg(dev, ENE_FAN_AS_IN1, 0); ++ else { ++ ene_hw_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN); ++ ene_hw_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN); ++ } ++ dev->fan_input_inuse = enable; ++} ++ ++/* determine which input to use*/ ++static void ene_set_inputs(struct ene_device *dev, int learning_enable) ++{ ++ ene_enable_normal_recieve(dev, 1); ++ ++ /* old hardware doesn't support learning mode for sure */ ++ if (dev->hw_revision <= ENE_HW_B) ++ return; ++ ++ /* reciever not learning capable, still set gpio40 correctly */ ++ if (!dev->hw_learning_and_tx_capable) { ++ ene_enable_gpio40(dev, !dev->hw_gpio40_learning); ++ return; ++ } ++ ++ /* enable learning mode */ ++ if (learning_enable) { ++ ene_enable_gpio40(dev, dev->hw_gpio40_learning); ++ ++ /* fan input is not used for learning */ ++ if (dev->hw_fan_as_normal_input) ++ ene_enable_fan_recieve(dev, 0); ++ ++ /* disable learning mode */ ++ } else { ++ if (dev->hw_fan_as_normal_input) { ++ ene_enable_fan_recieve(dev, 1); ++ ene_enable_normal_recieve(dev, 0); ++ } else ++ ene_enable_gpio40(dev, !dev->hw_gpio40_learning); ++ } ++ ++ /* set few additional settings for this mode */ ++ ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, learning_enable ? ++ ENE_CIR_CONF1_LEARN1 : 0, ENE_CIR_CONF1_LEARN1); ++ ++ ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, learning_enable ? ++ ENE_CIR_CONF2_LEARN2 : 0, ENE_CIR_CONF2_LEARN2); ++} ++ ++/* deinitialization */ ++static void ene_hw_deinit(void *data) ++{ ++ struct ene_device *dev = (struct ene_device *)data; ++ ++ /* disable samplers */ ++ ene_enable_normal_recieve(dev, 0); ++ ++ if (dev->hw_fan_as_normal_input) ++ ene_enable_fan_recieve(dev, 0); ++ ++ /* disable hardware IRQ and firmware flag */ ++ ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_ENABLE | ENE_FW1_IRQ); ++ ++ ene_set_idle(dev, 1); ++ dev->in_use = 0; ++} ++ ++/* sends current sample to userspace */ ++static void send_sample(struct ene_device *dev) ++{ ++ int value = abs(dev->sample) & PULSE_MASK; ++ ++ if (dev->sample > 0) ++ value |= PULSE_BIT; ++ ++ if (!lirc_buffer_full(dev->lirc_driver->rbuf)) { ++ lirc_buffer_write(dev->lirc_driver->rbuf, (void *)&value); ++ wake_up(&dev->lirc_driver->rbuf->wait_poll); ++ } ++ dev->sample = 0; ++} ++ ++/* this updates current sample */ ++static void update_sample(struct ene_device *dev, int sample) ++{ ++ if (!dev->sample) ++ dev->sample = sample; ++ else if (same_sign(dev->sample, sample)) ++ dev->sample += sample; ++ else { ++ send_sample(dev); ++ dev->sample = sample; ++ } ++} ++ ++/* enable or disable idle mode */ ++static void ene_set_idle(struct ene_device *dev, int idle) ++{ ++ struct timeval now; ++ int disable = idle && enable_idle && (dev->hw_revision < ENE_HW_C); ++ ++ ene_hw_write_reg_mask(dev, ENE_CIR_SAMPLE_PERIOD, ++ disable ? 0 : ENE_CIR_SAMPLE_OVERFLOW, ++ ENE_CIR_SAMPLE_OVERFLOW); ++ dev->idle = idle; ++ ++ /* remember when we have entered the idle mode */ ++ if (idle) { ++ do_gettimeofday(&dev->gap_start); ++ return; ++ } ++ ++ /* send the gap between keypresses now */ ++ do_gettimeofday(&now); ++ ++ if (now.tv_sec - dev->gap_start.tv_sec > 16) ++ dev->sample = space(PULSE_MASK); ++ else ++ dev->sample = dev->sample + ++ space(1000000ull * (now.tv_sec - dev->gap_start.tv_sec)) ++ + space(now.tv_usec - dev->gap_start.tv_usec); ++ ++ if (abs(dev->sample) > PULSE_MASK) ++ dev->sample = space(PULSE_MASK); ++ send_sample(dev); ++} ++ ++/* interrupt handler */ ++static irqreturn_t ene_hw_irq(int irq, void *data) ++{ ++ u16 hw_value; ++ int i, hw_sample; ++ int space; ++ int buffer_pointer; ++ int irq_status; ++ ++ struct ene_device *dev = (struct ene_device *)data; ++ irq_status = ene_hw_irq_status(dev, &buffer_pointer); ++ ++ if (!irq_status) ++ return IRQ_NONE; ++ ++ /* TODO: only RX for now */ ++ if (irq_status == ENE_IRQ_TX) ++ return IRQ_HANDLED; ++ ++ for (i = 0; i < ENE_SAMPLES_SIZE; i++) { ++ ++ hw_value = ene_hw_read_reg(dev, ++ ENE_SAMPLE_BUFFER + buffer_pointer + i); ++ ++ if (dev->fan_input_inuse) { ++ /* read high part of the sample */ ++ hw_value |= ene_hw_read_reg(dev, ++ ENE_SAMPLE_BUFFER_FAN + buffer_pointer + i) << 8; ++ ++ /* test for _space_ bit */ ++ space = !(hw_value & ENE_FAN_SMPL_PULS_MSK); ++ ++ /* clear space bit, and other unused bits */ ++ hw_value &= ENE_FAN_VALUE_MASK; ++ hw_sample = hw_value * ENE_SAMPLE_PERIOD_FAN; ++ ++ } else { ++ space = hw_value & ENE_SAMPLE_SPC_MASK; ++ hw_value &= ENE_SAMPLE_VALUE_MASK; ++ hw_sample = hw_value * sample_period; ++ } ++ ++ /* no more data */ ++ if (!(hw_value)) ++ break; ++ ++ if (space) ++ hw_sample *= -1; ++ ++ /* overflow sample recieved, handle it */ ++ ++ if (!dev->fan_input_inuse && hw_value == ENE_SAMPLE_OVERFLOW) { ++ ++ if (dev->idle) ++ continue; ++ ++ if (dev->sample > 0 || abs(dev->sample) <= ENE_MAXGAP) ++ update_sample(dev, hw_sample); ++ else ++ ene_set_idle(dev, 1); ++ ++ continue; ++ } ++ ++ /* normal first sample recieved */ ++ if (!dev->fan_input_inuse && dev->idle) { ++ ene_set_idle(dev, 0); ++ ++ /* discard first recieved value, its random ++ since its the time signal was off before ++ first pulse if idle mode is enabled, HW ++ does that for us */ ++ ++ if (!enable_idle) ++ continue; ++ } ++ update_sample(dev, hw_sample); ++ send_sample(dev); ++ } ++ return IRQ_HANDLED; ++} ++ ++static int ene_probe(struct pnp_dev *pnp_dev, ++ const struct pnp_device_id *dev_id) ++{ ++ struct ene_device *dev; ++ struct lirc_driver *lirc_driver; ++ int error = -ENOMEM; ++ ++ dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL); ++ ++ if (!dev) ++ goto err1; ++ ++ dev->pnp_dev = pnp_dev; ++ pnp_set_drvdata(pnp_dev, dev); ++ ++ ++ /* prepare lirc interface */ ++ error = -ENOMEM; ++ lirc_driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); ++ ++ if (!lirc_driver) ++ goto err2; ++ ++ dev->lirc_driver = lirc_driver; ++ ++ strcpy(lirc_driver->name, ENE_DRIVER_NAME); ++ lirc_driver->minor = -1; ++ lirc_driver->code_length = sizeof(int) * 8; ++ lirc_driver->features = LIRC_CAN_REC_MODE2; ++ lirc_driver->data = dev; ++ lirc_driver->set_use_inc = ene_hw_init; ++ lirc_driver->set_use_dec = ene_hw_deinit; ++ lirc_driver->dev = &pnp_dev->dev; ++ lirc_driver->owner = THIS_MODULE; ++ ++ lirc_driver->rbuf = kzalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ ++ if (!lirc_driver->rbuf) ++ goto err3; ++ ++ if (lirc_buffer_init(lirc_driver->rbuf, sizeof(int), sizeof(int) * 256)) ++ goto err4; ++ ++ error = -ENODEV; ++ if (lirc_register_driver(lirc_driver)) ++ goto err5; ++ ++ /* validate resources */ ++ if (!pnp_port_valid(pnp_dev, 0) || ++ pnp_port_len(pnp_dev, 0) < ENE_MAX_IO) ++ goto err6; ++ ++ if (!pnp_irq_valid(pnp_dev, 0)) ++ goto err6; ++ ++ dev->hw_io = pnp_port_start(pnp_dev, 0); ++ dev->irq = pnp_irq(pnp_dev, 0); ++ ++ /* claim the resources */ ++ error = -EBUSY; ++ if (!request_region(dev->hw_io, ENE_MAX_IO, ENE_DRIVER_NAME)) ++ goto err6; ++ ++ if (request_irq(dev->irq, ene_hw_irq, ++ IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) ++ goto err7; ++ ++ /* detect hardware version and features */ ++ error = ene_hw_detect(dev); ++ if (error) ++ goto err8; ++ ++ ene_printk(KERN_NOTICE, "driver has been succesfully loaded\n"); ++ return 0; ++ ++err8: ++ free_irq(dev->irq, dev); ++err7: ++ release_region(dev->hw_io, ENE_MAX_IO); ++err6: ++ lirc_unregister_driver(lirc_driver->minor); ++err5: ++ lirc_buffer_free(lirc_driver->rbuf); ++err4: ++ kfree(lirc_driver->rbuf); ++err3: ++ kfree(lirc_driver); ++err2: ++ kfree(dev); ++err1: ++ return error; ++} ++ ++static void ene_remove(struct pnp_dev *pnp_dev) ++{ ++ struct ene_device *dev = pnp_get_drvdata(pnp_dev); ++ ene_hw_deinit(dev); ++ free_irq(dev->irq, dev); ++ release_region(dev->hw_io, ENE_MAX_IO); ++ lirc_unregister_driver(dev->lirc_driver->minor); ++ lirc_buffer_free(dev->lirc_driver->rbuf); ++ kfree(dev->lirc_driver); ++ kfree(dev); ++} ++ ++#ifdef CONFIG_PM ++ ++/* TODO: make 'wake on IR' configurable and add .shutdown */ ++/* currently impossible due to lack of kernel support */ ++ ++static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state) ++{ ++ struct ene_device *dev = pnp_get_drvdata(pnp_dev); ++ ene_hw_write_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, ENE_FW1_WAKE); ++ return 0; ++} ++ ++static int ene_resume(struct pnp_dev *pnp_dev) ++{ ++ struct ene_device *dev = pnp_get_drvdata(pnp_dev); ++ if (dev->in_use) ++ ene_hw_init(dev); ++ ++ ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_WAKE); ++ return 0; ++} ++ ++#endif ++ ++static const struct pnp_device_id ene_ids[] = { ++ {.id = "ENE0100",}, ++ {}, ++}; ++ ++static struct pnp_driver ene_driver = { ++ .name = ENE_DRIVER_NAME, ++ .id_table = ene_ids, ++ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, ++ ++ .probe = ene_probe, ++ .remove = __devexit_p(ene_remove), ++ ++#ifdef CONFIG_PM ++ .suspend = ene_suspend, ++ .resume = ene_resume, ++#endif ++}; ++ ++static int __init ene_init(void) ++{ ++ if (sample_period < 5) { ++ ene_printk(KERN_ERR, "sample period must be at\n"); ++ ene_printk(KERN_ERR, "least 5 us, (at least 30 recommended)\n"); ++ return -EINVAL; ++ } ++ return pnp_register_driver(&ene_driver); ++} ++ ++static void ene_exit(void) ++{ ++ pnp_unregister_driver(&ene_driver); ++} ++ ++module_param(sample_period, int, S_IRUGO); ++MODULE_PARM_DESC(sample_period, "Hardware sample period (75 us default)"); ++ ++module_param(enable_idle, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(enable_idle, ++ "Enables turning off signal sampling after long inactivity time; " ++ "if disabled might help detecting input signal (default: enabled)"); ++ ++module_param(enable_learning, bool, S_IRUGO); ++MODULE_PARM_DESC(enable_learning, "Use wide band (learning) reciever"); ++ ++MODULE_DEVICE_TABLE(pnp, ene_ids); ++MODULE_DESCRIPTION ++ ("LIRC driver for KB3926B/KB3926C/KB3926D (aka ENE0100) CIR port"); ++MODULE_AUTHOR("Maxim Levitsky"); ++MODULE_LICENSE("GPL"); ++ ++module_init(ene_init); ++module_exit(ene_exit); +diff --git a/drivers/input/lirc/lirc_ene0100.h b/drivers/input/lirc/lirc_ene0100.h +new file mode 100644 +index 0000000..953e7e4 +--- /dev/null ++++ b/drivers/input/lirc/lirc_ene0100.h +@@ -0,0 +1,169 @@ ++/* ++ * driver for ENE KB3926 B/C/D CIR (also known as ENE0100) ++ * ++ * Copyright (C) 2009 Maxim Levitsky ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 ++ * USA ++ */ ++ ++#include ++#include "lirc_dev.h" ++ ++/* hardware address */ ++#define ENE_STATUS 0 /* hardware status - unused */ ++#define ENE_ADDR_HI 1 /* hi byte of register address */ ++#define ENE_ADDR_LO 2 /* low byte of register address */ ++#define ENE_IO 3 /* read/write window */ ++#define ENE_MAX_IO 4 ++ ++/* 8 bytes of samples, divided in 2 halfs*/ ++#define ENE_SAMPLE_BUFFER 0xF8F0 /* regular sample buffer */ ++#define ENE_SAMPLE_SPC_MASK (1 << 7) /* sample is space */ ++#define ENE_SAMPLE_VALUE_MASK 0x7F ++#define ENE_SAMPLE_OVERFLOW 0x7F ++#define ENE_SAMPLES_SIZE 4 ++ ++/* fan input sample buffer */ ++#define ENE_SAMPLE_BUFFER_FAN 0xF8FB /* this buffer holds high byte of */ ++ /* each sample of normal buffer */ ++ ++#define ENE_FAN_SMPL_PULS_MSK 0x8000 /* this bit of combined sample */ ++ /* if set, says that sample is pulse */ ++#define ENE_FAN_VALUE_MASK 0x0FFF /* mask for valid bits of the value */ ++ ++/* first firmware register */ ++#define ENE_FW1 0xF8F8 ++#define ENE_FW1_ENABLE (1 << 0) /* enable fw processing */ ++#define ENE_FW1_TXIRQ (1 << 1) /* TX interrupt pending */ ++#define ENE_FW1_WAKE (1 << 6) /* enable wake from S3 */ ++#define ENE_FW1_IRQ (1 << 7) /* enable interrupt */ ++ ++/* second firmware register */ ++#define ENE_FW2 0xF8F9 ++#define ENE_FW2_BUF_HIGH (1 << 0) /* which half of the buffer to read */ ++#define ENE_FW2_IRQ_CLR (1 << 2) /* clear this on IRQ */ ++#define ENE_FW2_GP40_AS_LEARN (1 << 4) /* normal input is used as */ ++ /* learning input */ ++#define ENE_FW2_FAN_AS_NRML_IN (1 << 6) /* fan is used as normal input */ ++#define ENE_FW2_LEARNING (1 << 7) /* hardware supports learning and TX */ ++ ++/* fan as input settings - only if learning capable */ ++#define ENE_FAN_AS_IN1 0xFE30 /* fan init reg 1 */ ++#define ENE_FAN_AS_IN1_EN 0xCD ++#define ENE_FAN_AS_IN2 0xFE31 /* fan init reg 2 */ ++#define ENE_FAN_AS_IN2_EN 0x03 ++#define ENE_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */ ++ ++/* IRQ registers block (for revision B) */ ++#define ENEB_IRQ 0xFD09 /* IRQ number */ ++#define ENEB_IRQ_UNK1 0xFD17 /* unknown setting = 1 */ ++#define ENEB_IRQ_STATUS 0xFD80 /* irq status */ ++#define ENEB_IRQ_STATUS_IR (1 << 5) /* IR irq */ ++ ++/* IRQ registers block (for revision C,D) */ ++#define ENEC_IRQ 0xFE9B /* new irq settings register */ ++#define ENEC_IRQ_MASK 0x0F /* irq number mask */ ++#define ENEC_IRQ_UNK_EN (1 << 4) /* always enabled */ ++#define ENEC_IRQ_STATUS (1 << 5) /* irq status and ACK */ ++ ++/* CIR block settings */ ++#define ENE_CIR_CONF1 0xFEC0 ++#define ENE_CIR_CONF1_ADC_ON 0x7 /* reciever on gpio40 enabled */ ++#define ENE_CIR_CONF1_LEARN1 (1 << 3) /* enabled on learning mode */ ++#define ENE_CIR_CONF1_TX_ON 0x30 /* enabled on transmit */ ++#define ENE_CIR_CONF1_TX_CARR (1 << 7) /* send TX carrier or not */ ++ ++#define ENE_CIR_CONF2 0xFEC1 /* unknown setting = 0 */ ++#define ENE_CIR_CONF2_LEARN2 (1 << 4) /* set on enable learning */ ++#define ENE_CIR_CONF2_GPIO40DIS (1 << 5) /* disable normal input via gpio40 */ ++ ++#define ENE_CIR_SAMPLE_PERIOD 0xFEC8 /* sample period in us */ ++#define ENE_CIR_SAMPLE_OVERFLOW (1 << 7) /* interrupt on overflows if set */ ++ ++ ++/* transmitter - not implemented yet */ ++/* KB3926C and higher */ ++/* transmission is very similiar to recieving, a byte is written to */ ++/* ENE_TX_INPUT, in same manner as it is read from sample buffer */ ++/* sample period is fixed*/ ++ ++ ++/* transmitter ports */ ++#define ENE_TX_PORT1 0xFC01 /* this enables one or both */ ++#define ENE_TX_PORT1_EN (1 << 5) /* TX ports */ ++#define ENE_TX_PORT2 0xFC08 ++#define ENE_TX_PORT2_EN (1 << 1) ++ ++#define ENE_TX_INPUT 0xFEC9 /* next byte to transmit */ ++#define ENE_TX_SPC_MASK (1 << 7) /* Transmitted sample is space */ ++#define ENE_TX_UNK1 0xFECB /* set to 0x63 */ ++#define ENE_TX_SMPL_PERIOD 50 /* transmit sample period */ ++ ++ ++#define ENE_TX_CARRIER 0xFECE /* TX carrier * 2 (khz) */ ++#define ENE_TX_CARRIER_UNKBIT 0x80 /* This bit set on transmit */ ++#define ENE_TX_CARRIER_LOW 0xFECF /* TX carrier / 2 */ ++ ++/* Hardware versions */ ++#define ENE_HW_VERSION 0xFF00 /* hardware revision */ ++#define ENE_HW_UNK 0xFF1D ++#define ENE_HW_UNK_CLR (1 << 2) ++#define ENE_HW_VER_MAJOR 0xFF1E /* chip version */ ++#define ENE_HW_VER_MINOR 0xFF1F ++#define ENE_HW_VER_OLD 0xFD00 ++ ++#define same_sign(a, b) ((((a) > 0) && (b) > 0) || ((a) < 0 && (b) < 0)) ++ ++#define ENE_DRIVER_NAME "enecir" ++#define ENE_MAXGAP 250000 /* this is amount of time we wait ++ before turning the sampler, chosen ++ arbitry */ ++ ++#define space(len) (-(len)) /* add a space */ ++ ++/* software defines */ ++#define ENE_IRQ_RX 1 ++#define ENE_IRQ_TX 2 ++ ++#define ENE_HW_B 1 /* 3926B */ ++#define ENE_HW_C 2 /* 3926C */ ++#define ENE_HW_D 3 /* 3926D */ ++ ++#define ene_printk(level, text, ...) \ ++ printk(level ENE_DRIVER_NAME ": " text, ## __VA_ARGS__) ++ ++struct ene_device { ++ struct pnp_dev *pnp_dev; ++ struct lirc_driver *lirc_driver; ++ ++ /* hw settings */ ++ unsigned long hw_io; ++ int irq; ++ ++ int hw_revision; /* hardware revision */ ++ int hw_learning_and_tx_capable; /* learning capable */ ++ int hw_gpio40_learning; /* gpio40 is learning */ ++ int hw_fan_as_normal_input; /* fan input is used as regular input */ ++ ++ /* device data */ ++ int idle; ++ int fan_input_inuse; ++ ++ int sample; ++ int in_use; ++ ++ struct timeval gap_start; ++}; +diff --git a/drivers/input/lirc/lirc_i2c.c b/drivers/input/lirc/lirc_i2c.c +new file mode 100644 +index 0000000..f3f8c2e +--- /dev/null ++++ b/drivers/input/lirc/lirc_i2c.c +@@ -0,0 +1,536 @@ ++/* ++ * lirc_i2c.c ++ * ++ * i2c IR driver for the onboard IR port on many TV tuner cards, including: ++ * -Flavors of the Hauppauge PVR-150/250/350 ++ * -Hauppauge HVR-1300 ++ * -PixelView (BT878P+W/FM) ++ * -KNC ONE TV Station/Anubis Typhoon TView Tuner ++ * -Asus TV-Box and Creative/VisionTek BreakOut-Box ++ * -Leadtek Winfast PVR2000 ++ * ++ * Copyright (c) 2000 Gerd Knorr ++ * modified for PixelView (BT878P+W/FM) by ++ * Michal Kochanowicz ++ * Christoph Bartelmus ++ * modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by ++ * Ulrich Mueller ++ * modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by ++ * Stefan Jahn ++ * modified for inclusion into kernel sources by ++ * Jerome Brock ++ * modified for Leadtek Winfast PVR2000 by ++ * Thomas Reitmayr (treitmayr@yahoo.com) ++ * modified for Hauppauge HVR-1300 by ++ * Jan Frey (jfrey@gmx.de) ++ * ++ * parts are cut&pasted from the old lirc_haup.c driver ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "lirc_dev.h" ++ ++struct IR { ++ struct lirc_driver l; ++ struct i2c_client c; ++ int nextkey; ++ unsigned char b[3]; ++ unsigned char bits; ++ unsigned char flag; ++}; ++ ++#define DEVICE_NAME "lirc_i2c" ++ ++/* module parameters */ ++static int debug; /* debug output */ ++static int minor = -1; /* minor number */ ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG DEVICE_NAME ": " fmt, \ ++ ## args); \ ++ } while (0) ++ ++static int reverse(int data, int bits) ++{ ++ int i; ++ int c; ++ ++ for (c = 0, i = 0; i < bits; i++) ++ c |= ((data & (1<c, keybuf, 1); ++ /* poll IR chip */ ++ if (i2c_master_recv(&ir->c, keybuf, sizeof(keybuf)) != sizeof(keybuf)) { ++ dprintk("read error\n"); ++ return -EIO; ++ } ++ ++ dprintk("key (0x%02x%02x%02x%02x)\n", ++ keybuf[0], keybuf[1], keybuf[2], keybuf[3]); ++ ++ /* key pressed ? */ ++ if (keybuf[2] == 0xff) ++ return -ENODATA; ++ ++ /* remove repeat bit */ ++ keybuf[2] &= 0x7f; ++ keybuf[3] |= 0x80; ++ ++ lirc_buffer_write(buf, keybuf); ++ return 0; ++} ++ ++static int add_to_buf_pcf8574(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ int rc; ++ unsigned char all, mask; ++ unsigned char key; ++ ++ /* compute all valid bits (key code + pressed/release flag) */ ++ all = ir->bits | ir->flag; ++ ++ /* save IR writable mask bits */ ++ mask = i2c_smbus_read_byte(&ir->c) & ~all; ++ ++ /* send bit mask */ ++ rc = i2c_smbus_write_byte(&ir->c, (0xff & all) | mask); ++ ++ /* receive scan code */ ++ rc = i2c_smbus_read_byte(&ir->c); ++ ++ if (rc == -1) { ++ dprintk("%s read error\n", ir->c.name); ++ return -EIO; ++ } ++ ++ /* drop duplicate polls */ ++ if (ir->b[0] == (rc & all)) ++ return -ENODATA; ++ ++ ir->b[0] = rc & all; ++ ++ dprintk("%s key 0x%02X %s\n", ir->c.name, rc & ir->bits, ++ (rc & ir->flag) ? "released" : "pressed"); ++ ++ /* ignore released buttons */ ++ if (rc & ir->flag) ++ return -ENODATA; ++ ++ /* set valid key code */ ++ key = rc & ir->bits; ++ lirc_buffer_write(buf, &key); ++ return 0; ++} ++ ++/* common for Hauppauge IR receivers */ ++static int add_to_buf_haup_common(void *data, struct lirc_buffer *buf, ++ unsigned char *keybuf, int size, int offset) ++{ ++ struct IR *ir = data; ++ __u16 code; ++ unsigned char codes[2]; ++ int ret; ++ ++ /* poll IR chip */ ++ ret = i2c_master_recv(&ir->c, keybuf, size); ++ if (ret == size) { ++ ir->b[0] = keybuf[offset]; ++ ir->b[1] = keybuf[offset+1]; ++ ir->b[2] = keybuf[offset+2]; ++ if (ir->b[0] != 0x00 && ir->b[1] != 0x00) ++ dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]); ++ } else { ++ dprintk("read error (ret=%d)\n", ret); ++ /* keep last successful read buffer */ ++ } ++ ++ /* key pressed ? */ ++ if ((ir->b[0] & 0x80) == 0) ++ return -ENODATA; ++ ++ /* look what we have */ ++ code = (((__u16)ir->b[0]&0x7f)<<6) | (ir->b[1]>>2); ++ ++ codes[0] = (code >> 8) & 0xff; ++ codes[1] = code & 0xff; ++ ++ /* return it */ ++ dprintk("sending code 0x%02x%02x to lirc\n", codes[0], codes[1]); ++ lirc_buffer_write(buf, codes); ++ return 0; ++} ++ ++/* specific for the Hauppauge PVR150 IR receiver */ ++static int add_to_buf_haup_pvr150(void *data, struct lirc_buffer *buf) ++{ ++ unsigned char keybuf[6]; ++ /* fetch 6 bytes, first relevant is at offset 3 */ ++ return add_to_buf_haup_common(data, buf, keybuf, 6, 3); ++} ++ ++/* used for all Hauppauge IR receivers but the PVR150 */ ++static int add_to_buf_haup(void *data, struct lirc_buffer *buf) ++{ ++ unsigned char keybuf[3]; ++ /* fetch 3 bytes, first relevant is at offset 0 */ ++ return add_to_buf_haup_common(data, buf, keybuf, 3, 0); ++} ++ ++ ++static int add_to_buf_pvr2000(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ unsigned char key; ++ s32 flags; ++ s32 code; ++ ++ /* poll IR chip */ ++ flags = i2c_smbus_read_byte_data(&ir->c, 0x10); ++ if (-1 == flags) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ /* key pressed ? */ ++ if (0 == (flags & 0x80)) ++ return -ENODATA; ++ ++ /* read actual key code */ ++ code = i2c_smbus_read_byte_data(&ir->c, 0x00); ++ if (-1 == code) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ ++ key = code & 0xFF; ++ ++ dprintk("IR Key/Flags: (0x%02x/0x%02x)\n", key, flags & 0xFF); ++ ++ /* return it */ ++ lirc_buffer_write(buf, &key); ++ return 0; ++} ++ ++static int add_to_buf_pixelview(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ unsigned char key; ++ ++ /* poll IR chip */ ++ if (1 != i2c_master_recv(&ir->c, &key, 1)) { ++ dprintk("read error\n"); ++ return -1; ++ } ++ dprintk("key %02x\n", key); ++ ++ /* return it */ ++ lirc_buffer_write(buf, &key); ++ return 0; ++} ++ ++static int add_to_buf_pv951(void *data, struct lirc_buffer *buf) ++{ ++ struct IR *ir = data; ++ unsigned char key; ++ unsigned char codes[4]; ++ ++ /* poll IR chip */ ++ if (1 != i2c_master_recv(&ir->c, &key, 1)) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ /* ignore 0xaa */ ++ if (key == 0xaa) ++ return -ENODATA; ++ dprintk("key %02x\n", key); ++ ++ codes[0] = 0x61; ++ codes[1] = 0xD6; ++ codes[2] = reverse(key, 8); ++ codes[3] = (~codes[2])&0xff; ++ ++ lirc_buffer_write(buf, codes); ++ return 0; ++} ++ ++static int add_to_buf_knc1(void *data, struct lirc_buffer *buf) ++{ ++ static unsigned char last_key = 0xFF; ++ struct IR *ir = data; ++ unsigned char key; ++ ++ /* poll IR chip */ ++ if (1 != i2c_master_recv(&ir->c, &key, 1)) { ++ dprintk("read error\n"); ++ return -ENODATA; ++ } ++ ++ /* ++ * it seems that 0xFE indicates that a button is still held ++ * down, while 0xFF indicates that no button is held ++ * down. 0xFE sequences are sometimes interrupted by 0xFF ++ */ ++ ++ dprintk("key %02x\n", key); ++ ++ if (key == 0xFF) ++ return -ENODATA; ++ ++ if (key == 0xFE) ++ key = last_key; ++ ++ last_key = key; ++ lirc_buffer_write(buf, &key); ++ ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct IR *ir = data; ++ ++ dprintk("%s called\n", __func__); ++ ++ /* lock bttv in memory while /dev/lirc is in use */ ++ i2c_use_client(&ir->c); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct IR *ir = data; ++ ++ dprintk("%s called\n", __func__); ++ ++ i2c_release_client(&ir->c); ++} ++ ++static struct lirc_driver lirc_template = { ++ .name = "lirc_i2c", ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id); ++static int ir_remove(struct i2c_client *client); ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg); ++ ++static const struct i2c_device_id ir_receiver_id[] = { ++ /* Generic entry for any IR receiver */ ++ { "ir_video", 0 }, ++ /* IR device specific entries could be added here */ ++ { } ++}; ++ ++static struct i2c_driver driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "i2c ir driver", ++ }, ++ .probe = ir_probe, ++ .remove = ir_remove, ++ .id_table = ir_receiver_id, ++ .command = ir_command, ++}; ++ ++static void pcf_probe(struct i2c_client *client, struct IR *ir) ++{ ++ int ret1, ret2, ret3, ret4; ++ ++ ret1 = i2c_smbus_write_byte(client, 0xff); ++ ret2 = i2c_smbus_read_byte(client); ++ ret3 = i2c_smbus_write_byte(client, 0x00); ++ ret4 = i2c_smbus_read_byte(client); ++ ++ /* in the Asus TV-Box: bit 1-0 */ ++ if (((ret2 & 0x03) == 0x03) && ((ret4 & 0x03) == 0x00)) { ++ ir->bits = (unsigned char) ~0x07; ++ ir->flag = 0x04; ++ /* in the Creative/VisionTek BreakOut-Box: bit 7-6 */ ++ } else if (((ret2 & 0xc0) == 0xc0) && ((ret4 & 0xc0) == 0x00)) { ++ ir->bits = (unsigned char) ~0xe0; ++ ir->flag = 0x20; ++ } ++ ++ return; ++} ++ ++static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) ++{ ++ struct IR *ir; ++ struct i2c_adapter *adap = client->adapter; ++ unsigned short addr = client->addr; ++ int retval; ++ ++ ir = kzalloc(sizeof(struct IR), GFP_KERNEL); ++ if (!ir) ++ return -ENOMEM; ++ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver)); ++ memcpy(&ir->c, client, sizeof(struct i2c_client)); ++ ++ i2c_set_clientdata(client, ir); ++ ir->l.data = ir; ++ ir->l.minor = minor; ++ ir->l.sample_rate = 10; ++ ir->l.dev = &ir->c.dev; ++ ir->nextkey = -1; ++ ++ switch (addr) { ++ case 0x64: ++ strlcpy(ir->c.name, "Pixelview IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_pixelview; ++ break; ++ case 0x4b: ++ strlcpy(ir->c.name, "PV951 IR", I2C_NAME_SIZE); ++ ir->l.code_length = 32; ++ ir->l.add_to_buf = add_to_buf_pv951; ++ break; ++ case 0x71: ++ if (adap->id == I2C_HW_B_CX2388x) ++ strlcpy(ir->c.name, "Hauppauge HVR1300", I2C_NAME_SIZE); ++ else /* bt8xx or cx2341x */ ++ /* ++ * The PVR150 IR receiver uses the same protocol as ++ * other Hauppauge cards, but the data flow is ++ * different, so we need to deal with it by its own. ++ */ ++ strlcpy(ir->c.name, "Hauppauge PVR150", I2C_NAME_SIZE); ++ ir->l.code_length = 13; ++ ir->l.add_to_buf = add_to_buf_haup_pvr150; ++ break; ++ case 0x6b: ++ strlcpy(ir->c.name, "Adaptec IR", I2C_NAME_SIZE); ++ ir->l.code_length = 32; ++ ir->l.add_to_buf = add_to_buf_adap; ++ break; ++ case 0x18: ++ case 0x1a: ++ if (adap->id == I2C_HW_B_CX2388x) { ++ strlcpy(ir->c.name, "Leadtek IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_pvr2000; ++ } else { /* bt8xx or cx2341x */ ++ strlcpy(ir->c.name, "Hauppauge IR", I2C_NAME_SIZE); ++ ir->l.code_length = 13; ++ ir->l.add_to_buf = add_to_buf_haup; ++ } ++ break; ++ case 0x30: ++ strlcpy(ir->c.name, "KNC ONE IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_knc1; ++ break; ++ case 0x21: ++ case 0x23: ++ pcf_probe(client, ir); ++ strlcpy(ir->c.name, "TV-Box IR", I2C_NAME_SIZE); ++ ir->l.code_length = 8; ++ ir->l.add_to_buf = add_to_buf_pcf8574; ++ break; ++ default: ++ /* shouldn't happen */ ++ printk("lirc_i2c: Huh? unknown i2c address (0x%02x)?\n", addr); ++ kfree(ir); ++ return -EINVAL; ++ } ++ printk(KERN_INFO "lirc_i2c: chip 0x%x found @ 0x%02x (%s)\n", ++ adap->id, addr, ir->c.name); ++ ++ retval = lirc_register_driver(&ir->l); ++ ++ if (retval < 0) { ++ printk(KERN_ERR "lirc_i2c: failed to register driver!\n"); ++ kfree(ir); ++ return retval; ++ } ++ ++ ir->l.minor = retval; ++ ++ return 0; ++} ++ ++static int ir_remove(struct i2c_client *client) ++{ ++ struct IR *ir = i2c_get_clientdata(client); ++ ++ /* unregister device */ ++ lirc_unregister_driver(ir->l.minor); ++ ++ /* free memory */ ++ kfree(ir); ++ return 0; ++} ++ ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg) ++{ ++ /* nothing */ ++ return 0; ++} ++ ++static int __init lirc_i2c_init(void) ++{ ++ i2c_add_driver(&driver); ++ return 0; ++} ++ ++static void __exit lirc_i2c_exit(void) ++{ ++ i2c_del_driver(&driver); ++} ++ ++MODULE_DESCRIPTION("Infrared receiver driver for Hauppauge and " ++ "Pixelview cards (i2c stack)"); ++MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, " ++ "Ulrich Mueller, Stefan Jahn, Jerome Brock"); ++MODULE_LICENSE("GPL"); ++ ++module_param(minor, int, S_IRUGO); ++MODULE_PARM_DESC(minor, "Preferred minor device number"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_init(lirc_i2c_init); ++module_exit(lirc_i2c_exit); +diff --git a/drivers/input/lirc/lirc_igorplugusb.c b/drivers/input/lirc/lirc_igorplugusb.c +new file mode 100644 +index 0000000..599037d +--- /dev/null ++++ b/drivers/input/lirc/lirc_igorplugusb.c +@@ -0,0 +1,556 @@ ++/* ++ * lirc_igorplugusb - USB remote support for LIRC ++ * ++ * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware. ++ * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm ++ * ++ * The device can only record bursts of up to 36 pulses/spaces. ++ * Works fine with RC5. Longer commands lead to device buffer overrun. ++ * (Maybe a better firmware or a microcontroller with more ram can help?) ++ * ++ * Version 0.1 [beta status] ++ * ++ * Copyright (C) 2004 Jan M. Hochstein ++ * ++ * ++ * This driver was derived from: ++ * Paul Miller ++ * "lirc_atiusb" module ++ * Vladimir Dergachev 's 2002 ++ * "USB ATI Remote support" (input device) ++ * Adrian Dewhurst 's 2002 ++ * "USB StreamZap remote driver" (LIRC) ++ * Artur Lipowski 's 2002 ++ * "lirc_dev" and "lirc_gpio" LIRC modules ++ */ ++ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++ ++/* module identification */ ++#define DRIVER_VERSION "0.1" ++#define DRIVER_AUTHOR \ ++ "Jan M. Hochstein " ++#define DRIVER_DESC "USB remote driver for LIRC" ++#define DRIVER_NAME "lirc_igorplugusb" ++ ++/* debugging support */ ++#ifdef CONFIG_USB_DEBUG ++static int debug = 1; ++#else ++static int debug; ++#endif ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG fmt, ## args); \ ++ } while (0) ++ ++/* One mode2 pulse/space has 4 bytes. */ ++#define CODE_LENGTH sizeof(int) ++ ++/* Igor's firmware cannot record bursts longer than 36. */ ++#define DEVICE_BUFLEN 36 ++ ++/* ++ * Header at the beginning of the device's buffer: ++ * unsigned char data_length ++ * unsigned char data_start (!=0 means ring-buffer overrun) ++ * unsigned char counter (incremented by each burst) ++ */ ++#define DEVICE_HEADERLEN 3 ++ ++/* This is for the gap */ ++#define ADDITIONAL_LIRC_BYTES 2 ++ ++/* times to poll per second */ ++#define SAMPLE_RATE 100 ++static int sample_rate = SAMPLE_RATE; ++ ++ ++/**** Igor's USB Request Codes */ ++ ++#define SET_INFRABUFFER_EMPTY 1 ++/** ++ * Params: none ++ * Answer: empty ++ */ ++ ++#define GET_INFRACODE 2 ++/** ++ * Params: ++ * wValue: offset to begin reading infra buffer ++ * ++ * Answer: infra data ++ */ ++ ++#define SET_DATAPORT_DIRECTION 3 ++/** ++ * Params: ++ * wValue: (byte) 1 bit for each data port pin (0=in, 1=out) ++ * ++ * Answer: empty ++ */ ++ ++#define GET_DATAPORT_DIRECTION 4 ++/** ++ * Params: none ++ * ++ * Answer: (byte) 1 bit for each data port pin (0=in, 1=out) ++ */ ++ ++#define SET_OUT_DATAPORT 5 ++/** ++ * Params: ++ * wValue: byte to write to output data port ++ * ++ * Answer: empty ++ */ ++ ++#define GET_OUT_DATAPORT 6 ++/** ++ * Params: none ++ * ++ * Answer: least significant 3 bits read from output data port ++ */ ++ ++#define GET_IN_DATAPORT 7 ++/** ++ * Params: none ++ * ++ * Answer: least significant 3 bits read from input data port ++ */ ++ ++#define READ_EEPROM 8 ++/** ++ * Params: ++ * wValue: offset to begin reading EEPROM ++ * ++ * Answer: EEPROM bytes ++ */ ++ ++#define WRITE_EEPROM 9 ++/** ++ * Params: ++ * wValue: offset to EEPROM byte ++ * wIndex: byte to write ++ * ++ * Answer: empty ++ */ ++ ++#define SEND_RS232 10 ++/** ++ * Params: ++ * wValue: byte to send ++ * ++ * Answer: empty ++ */ ++ ++#define RECV_RS232 11 ++/** ++ * Params: none ++ * ++ * Answer: byte received ++ */ ++ ++#define SET_RS232_BAUD 12 ++/** ++ * Params: ++ * wValue: byte to write to UART bit rate register (UBRR) ++ * ++ * Answer: empty ++ */ ++ ++#define GET_RS232_BAUD 13 ++/** ++ * Params: none ++ * ++ * Answer: byte read from UART bit rate register (UBRR) ++ */ ++ ++ ++/* data structure for each usb remote */ ++struct igorplug { ++ ++ /* usb */ ++ struct usb_device *usbdev; ++ struct urb *urb_in; ++ int devnum; ++ ++ unsigned char *buf_in; ++ unsigned int len_in; ++ int in_space; ++ struct timeval last_time; ++ ++ dma_addr_t dma_in; ++ ++ /* lirc */ ++ struct lirc_driver *d; ++ ++ /* handle sending (init strings) */ ++ int send_flags; ++ wait_queue_head_t wait_out; ++}; ++ ++static int unregister_from_lirc(struct igorplug *ir) ++{ ++ struct lirc_driver *d = ir->d; ++ int devnum; ++ ++ if (!ir->d) ++ return -EINVAL; ++ ++ devnum = ir->devnum; ++ dprintk(DRIVER_NAME "[%d]: unregister from lirc called\n", devnum); ++ ++ lirc_unregister_driver(d->minor); ++ ++ printk(DRIVER_NAME "[%d]: usb remote disconnected\n", devnum); ++ ++ kfree(d); ++ ir->d = NULL; ++ kfree(ir); ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct igorplug *ir = data; ++ ++ if (!ir) { ++ printk(DRIVER_NAME "[?]: set_use_inc called with no context\n"); ++ return -EIO; ++ } ++ dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum); ++ ++ if (!ir->usbdev) ++ return -ENODEV; ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct igorplug *ir = data; ++ ++ if (!ir) { ++ printk(DRIVER_NAME "[?]: set_use_dec called with no context\n"); ++ return; ++ } ++ dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum); ++} ++ ++ ++/** ++ * Called in user context. ++ * return 0 if data was added to the buffer and ++ * -ENODATA if none was available. This should add some number of bits ++ * evenly divisible by code_length to the buffer ++ */ ++static int usb_remote_poll(void *data, struct lirc_buffer *buf) ++{ ++ int ret; ++ struct igorplug *ir = (struct igorplug *)data; ++ ++ if (!ir->usbdev) /* Has the device been removed? */ ++ return -ENODEV; ++ ++ memset(ir->buf_in, 0, ir->len_in); ++ ++ ret = usb_control_msg( ++ ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ GET_INFRACODE, USB_TYPE_VENDOR|USB_DIR_IN, ++ 0/* offset */, /*unused*/0, ++ ir->buf_in, ir->len_in, ++ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); ++ if (ret > 0) { ++ int i = DEVICE_HEADERLEN; ++ int code, timediff; ++ struct timeval now; ++ ++ if (ret <= 1) /* ACK packet has 1 byte --> ignore */ ++ return -ENODATA; ++ ++ dprintk(DRIVER_NAME ": Got %d bytes. Header: %02x %02x %02x\n", ++ ret, ir->buf_in[0], ir->buf_in[1], ir->buf_in[2]); ++ ++ if (ir->buf_in[2] != 0) { ++ printk(DRIVER_NAME "[%d]: Device buffer overrun.\n", ++ ir->devnum); ++ /* start at earliest byte */ ++ i = DEVICE_HEADERLEN + ir->buf_in[2]; ++ /* where are we now? space, gap or pulse? */ ++ } ++ ++ do_gettimeofday(&now); ++ timediff = now.tv_sec - ir->last_time.tv_sec; ++ if (timediff + 1 > PULSE_MASK / 1000000) ++ timediff = PULSE_MASK; ++ else { ++ timediff *= 1000000; ++ timediff += now.tv_usec - ir->last_time.tv_usec; ++ } ++ ir->last_time.tv_sec = now.tv_sec; ++ ir->last_time.tv_usec = now.tv_usec; ++ ++ /* create leading gap */ ++ code = timediff; ++ lirc_buffer_write(buf, (unsigned char *)&code); ++ ir->in_space = 1; /* next comes a pulse */ ++ ++ /* MODE2: pulse/space (PULSE_BIT) in 1us units */ ++ ++ while (i < ret) { ++ /* 1 Igor-tick = 85.333333 us */ ++ code = (unsigned int)ir->buf_in[i] * 85 ++ + (unsigned int)ir->buf_in[i] / 3; ++ if (ir->in_space) ++ code |= PULSE_BIT; ++ lirc_buffer_write(buf, (unsigned char *)&code); ++ /* 1 chunk = CODE_LENGTH bytes */ ++ ir->in_space ^= 1; ++ ++i; ++ } ++ ++ ret = usb_control_msg( ++ ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN, ++ /*unused*/0, /*unused*/0, ++ /*dummy*/ir->buf_in, /*dummy*/ir->len_in, ++ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); ++ if (ret < 0) ++ printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: " ++ "error %d\n", ir->devnum, ret); ++ return 0; ++ } else if (ret < 0) ++ printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n", ++ ir->devnum, ret); ++ ++ return -ENODATA; ++} ++ ++ ++ ++static int usb_remote_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = NULL; ++ struct usb_host_interface *idesc = NULL; ++ struct usb_host_endpoint *ep_ctl2; ++ struct igorplug *ir = NULL; ++ struct lirc_driver *driver = NULL; ++ int devnum, pipe, maxp; ++ int minor = 0; ++ char buf[63], name[128] = ""; ++ int mem_failure = 0; ++ int ret; ++ ++ dprintk(DRIVER_NAME ": usb probe called.\n"); ++ ++ dev = interface_to_usbdev(intf); ++ ++ idesc = intf->cur_altsetting; ++ ++ if (idesc->desc.bNumEndpoints != 1) ++ return -ENODEV; ++ ep_ctl2 = idesc->endpoint; ++ if (((ep_ctl2->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ != USB_DIR_IN) ++ || (ep_ctl2->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ != USB_ENDPOINT_XFER_CONTROL) ++ return -ENODEV; ++ pipe = usb_rcvctrlpipe(dev, ep_ctl2->desc.bEndpointAddress); ++ devnum = dev->devnum; ++ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); ++ ++ dprintk(DRIVER_NAME "[%d]: bytes_in_key=%lu maxp=%d\n", ++ devnum, CODE_LENGTH, maxp); ++ ++ ++ mem_failure = 0; ++ ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL); ++ if (!ir) { ++ mem_failure = 1; ++ goto mem_failure_switch; ++ } ++ driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); ++ if (!driver) { ++ mem_failure = 2; ++ goto mem_failure_switch; ++ } ++ ++ ir->buf_in = usb_buffer_alloc(dev, ++ DEVICE_BUFLEN+DEVICE_HEADERLEN, ++ GFP_ATOMIC, &ir->dma_in); ++ if (!ir->buf_in) { ++ mem_failure = 3; ++ goto mem_failure_switch; ++ } ++ ++ strcpy(driver->name, DRIVER_NAME " "); ++ driver->minor = -1; ++ driver->code_length = CODE_LENGTH * 8; /* in bits */ ++ driver->features = LIRC_CAN_REC_MODE2; ++ driver->data = ir; ++ driver->chunk_size = CODE_LENGTH; ++ driver->buffer_size = DEVICE_BUFLEN + ADDITIONAL_LIRC_BYTES; ++ driver->set_use_inc = &set_use_inc; ++ driver->set_use_dec = &set_use_dec; ++ driver->sample_rate = sample_rate; /* per second */ ++ driver->add_to_buf = &usb_remote_poll; ++ driver->dev = &intf->dev; ++ driver->owner = THIS_MODULE; ++ ++ init_waitqueue_head(&ir->wait_out); ++ ++ minor = lirc_register_driver(driver); ++ if (minor < 0) ++ mem_failure = 9; ++ ++mem_failure_switch: ++ ++ switch (mem_failure) { ++ case 9: ++ usb_buffer_free(dev, DEVICE_BUFLEN+DEVICE_HEADERLEN, ++ ir->buf_in, ir->dma_in); ++ case 3: ++ kfree(driver); ++ case 2: ++ kfree(ir); ++ case 1: ++ printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n", ++ devnum, mem_failure); ++ return -ENOMEM; ++ } ++ ++ driver->minor = minor; ++ ir->d = driver; ++ ir->devnum = devnum; ++ ir->usbdev = dev; ++ ir->len_in = DEVICE_BUFLEN+DEVICE_HEADERLEN; ++ ir->in_space = 1; /* First mode2 event is a space. */ ++ do_gettimeofday(&ir->last_time); ++ ++ if (dev->descriptor.iManufacturer ++ && usb_string(dev, dev->descriptor.iManufacturer, ++ buf, sizeof(buf)) > 0) ++ strlcpy(name, buf, sizeof(name)); ++ if (dev->descriptor.iProduct ++ && usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0) ++ snprintf(name + strlen(name), sizeof(name) - strlen(name), ++ " %s", buf); ++ printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name, ++ dev->bus->busnum, devnum); ++ ++ /* clear device buffer */ ++ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN, ++ /*unused*/0, /*unused*/0, ++ /*dummy*/ir->buf_in, /*dummy*/ir->len_in, ++ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); ++ if (ret < 0) ++ printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n", ++ devnum, ret); ++ ++ usb_set_intfdata(intf, ir); ++ return 0; ++} ++ ++ ++static void usb_remote_disconnect(struct usb_interface *intf) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct igorplug *ir = usb_get_intfdata(intf); ++ usb_set_intfdata(intf, NULL); ++ ++ if (!ir || !ir->d) ++ return; ++ ++ ir->usbdev = NULL; ++ wake_up_all(&ir->wait_out); ++ ++ usb_buffer_free(dev, ir->len_in, ir->buf_in, ir->dma_in); ++ ++ unregister_from_lirc(ir); ++} ++ ++static struct usb_device_id usb_remote_id_table[] = { ++ /* Igor Plug USB (Atmel's Manufact. ID) */ ++ { USB_DEVICE(0x03eb, 0x0002) }, ++ ++ /* Terminating entry */ ++ { } ++}; ++ ++static struct usb_driver usb_remote_driver = { ++ .name = DRIVER_NAME, ++ .probe = usb_remote_probe, ++ .disconnect = usb_remote_disconnect, ++ .id_table = usb_remote_id_table ++}; ++ ++static int __init usb_remote_init(void) ++{ ++ int i; ++ ++ printk(KERN_INFO "\n" ++ DRIVER_NAME ": " DRIVER_DESC " v" DRIVER_VERSION "\n"); ++ printk(DRIVER_NAME ": " DRIVER_AUTHOR "\n"); ++ dprintk(DRIVER_NAME ": debug mode enabled\n"); ++ ++ i = usb_register(&usb_remote_driver); ++ if (i < 0) { ++ printk(DRIVER_NAME ": usb register failed, result = %d\n", i); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static void __exit usb_remote_exit(void) ++{ ++ usb_deregister(&usb_remote_driver); ++} ++ ++module_init(usb_remote_init); ++module_exit(usb_remote_exit); ++ ++#include ++MODULE_INFO(vermagic, VERMAGIC_STRING); ++ ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, usb_remote_id_table); ++ ++module_param(sample_rate, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)"); ++ +diff --git a/drivers/input/lirc/lirc_imon.c b/drivers/input/lirc/lirc_imon.c +new file mode 100644 +index 0000000..5bea43b +--- /dev/null ++++ b/drivers/input/lirc/lirc_imon.c +@@ -0,0 +1,1054 @@ ++/* ++ * lirc_imon.c: LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD ++ * including the iMON PAD model ++ * ++ * Copyright(C) 2004 Venky Raju(dev@venky.ws) ++ * Copyright(C) 2009 Jarod Wilson ++ * ++ * lirc_imon is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++ ++#define MOD_AUTHOR "Venky Raju " ++#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" ++#define MOD_NAME "lirc_imon" ++#define MOD_VERSION "0.8" ++ ++#define DISPLAY_MINOR_BASE 144 ++#define DEVICE_NAME "lcd%d" ++ ++#define BUF_CHUNK_SIZE 4 ++#define BUF_SIZE 128 ++ ++#define BIT_DURATION 250 /* each bit received is 250us */ ++ ++/*** P R O T O T Y P E S ***/ ++ ++/* USB Callback prototypes */ ++static int imon_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void imon_disconnect(struct usb_interface *interface); ++static void usb_rx_callback(struct urb *urb); ++static void usb_tx_callback(struct urb *urb); ++ ++/* suspend/resume support */ ++static int imon_resume(struct usb_interface *intf); ++static int imon_suspend(struct usb_interface *intf, pm_message_t message); ++ ++/* Display file_operations function prototypes */ ++static int display_open(struct inode *inode, struct file *file); ++static int display_close(struct inode *inode, struct file *file); ++ ++/* VFD write operation */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/* LIRC driver function prototypes */ ++static int ir_open(void *data); ++static void ir_close(void *data); ++ ++/* Driver init/exit prototypes */ ++static int __init imon_init(void); ++static void __exit imon_exit(void); ++ ++/*** G L O B A L S ***/ ++ ++struct imon_context { ++ struct usb_device *usbdev; ++ /* Newer devices have two interfaces */ ++ int display; /* not all controllers do */ ++ int display_isopen; /* display port has been opened */ ++ int ir_isopen; /* IR port open */ ++ int dev_present; /* USB device presence */ ++ struct mutex ctx_lock; /* to lock this object */ ++ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ ++ ++ int vfd_proto_6p; /* some VFD require a 6th packet */ ++ ++ struct lirc_driver *driver; ++ struct usb_endpoint_descriptor *rx_endpoint; ++ struct usb_endpoint_descriptor *tx_endpoint; ++ struct urb *rx_urb; ++ struct urb *tx_urb; ++ unsigned char usb_rx_buf[8]; ++ unsigned char usb_tx_buf[8]; ++ ++ struct rx_data { ++ int count; /* length of 0 or 1 sequence */ ++ int prev_bit; /* logic level of sequence */ ++ int initial_space; /* initial space flag */ ++ } rx; ++ ++ struct tx_t { ++ unsigned char data_buf[35]; /* user data buffer */ ++ struct completion finished; /* wait for write to finish */ ++ atomic_t busy; /* write in progress */ ++ int status; /* status of tx completion */ ++ } tx; ++}; ++ ++static struct file_operations display_fops = { ++ .owner = THIS_MODULE, ++ .open = &display_open, ++ .write = &vfd_write, ++ .release = &display_close ++}; ++ ++/* ++ * USB Device ID for iMON USB Control Boards ++ * ++ * The Windows drivers contain 6 different inf files, more or less one for ++ * each new device until the 0x0034-0x0046 devices, which all use the same ++ * driver. Some of the devices in the 34-46 range haven't been definitively ++ * identified yet. Early devices have either a TriGem Computer, Inc. or a ++ * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later ++ * devices use the SoundGraph vendor ID (0x15c2). ++ */ ++static struct usb_device_id imon_usb_id_table[] = { ++ /* TriGem iMON (IR only) -- TG_iMON.inf */ ++ { USB_DEVICE(0x0aa8, 0x8001) }, ++ ++ /* SoundGraph iMON (IR only) -- sg_imon.inf */ ++ { USB_DEVICE(0x04e8, 0xff30) }, ++ ++ /* SoundGraph iMON VFD (IR & VFD) -- iMON_VFD.inf */ ++ { USB_DEVICE(0x0aa8, 0xffda) }, ++ ++ /* SoundGraph iMON SS (IR & VFD) -- iMON_SS.inf */ ++ { USB_DEVICE(0x15c2, 0xffda) }, ++ ++ {} ++}; ++ ++/* Some iMON VFD models requires a 6th packet for VFD writes */ ++static struct usb_device_id vfd_proto_6p_list[] = { ++ { USB_DEVICE(0x15c2, 0xffda) }, ++ {} ++}; ++ ++/* Some iMON devices have no lcd/vfd, don't set one up */ ++static struct usb_device_id ir_only_list[] = { ++ { USB_DEVICE(0x0aa8, 0x8001) }, ++ { USB_DEVICE(0x04e8, 0xff30) }, ++ {} ++}; ++ ++/* USB Device data */ ++static struct usb_driver imon_driver = { ++ .name = MOD_NAME, ++ .probe = imon_probe, ++ .disconnect = imon_disconnect, ++ .suspend = imon_suspend, ++ .resume = imon_resume, ++ .id_table = imon_usb_id_table, ++}; ++ ++static struct usb_class_driver imon_class = { ++ .name = DEVICE_NAME, ++ .fops = &display_fops, ++ .minor_base = DISPLAY_MINOR_BASE, ++}; ++ ++/* to prevent races between open() and disconnect(), probing, etc */ ++static DEFINE_MUTEX(driver_lock); ++ ++static int debug; ++ ++/*** M O D U L E C O D E ***/ ++ ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); ++MODULE_VERSION(MOD_VERSION); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, imon_usb_id_table); ++module_param(debug, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)"); ++ ++static void free_imon_context(struct imon_context *context) ++{ ++ usb_free_urb(context->tx_urb); ++ usb_free_urb(context->rx_urb); ++ lirc_buffer_free(context->driver->rbuf); ++ kfree(context->driver->rbuf); ++ kfree(context->driver); ++ kfree(context); ++ ++ dev_dbg(context->driver->dev, "%s: iMON context freed\n", __func__); ++} ++ ++static void deregister_from_lirc(struct imon_context *context) ++{ ++ int retval; ++ int minor = context->driver->minor; ++ ++ retval = lirc_unregister_driver(minor); ++ if (retval) ++ err("%s: unable to deregister from lirc(%d)", ++ __func__, retval); ++ else ++ printk(KERN_INFO MOD_NAME ": Deregistered iMON driver " ++ "(minor:%d)\n", minor); ++ ++} ++ ++/** ++ * Called when the Display device (e.g. /dev/lcd0) ++ * is opened by the application. ++ */ ++static int display_open(struct inode *inode, struct file *file) ++{ ++ struct usb_interface *interface; ++ struct imon_context *context = NULL; ++ int subminor; ++ int retval = 0; ++ ++ /* prevent races with disconnect */ ++ mutex_lock(&driver_lock); ++ ++ subminor = iminor(inode); ++ interface = usb_find_interface(&imon_driver, subminor); ++ if (!interface) { ++ err("%s: could not find interface for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ context = usb_get_intfdata(interface); ++ ++ if (!context) { ++ err("%s: no context found for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (!context->display) { ++ err("%s: display not supported by device", __func__); ++ retval = -ENODEV; ++ } else if (context->display_isopen) { ++ err("%s: display port is already open", __func__); ++ retval = -EBUSY; ++ } else { ++ context->display_isopen = 1; ++ file->private_data = context; ++ dev_info(context->driver->dev, "display port opened\n"); ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ ++exit: ++ mutex_unlock(&driver_lock); ++ return retval; ++} ++ ++/** ++ * Called when the display device (e.g. /dev/lcd0) ++ * is closed by the application. ++ */ ++static int display_close(struct inode *inode, struct file *file) ++{ ++ struct imon_context *context = NULL; ++ int retval = 0; ++ ++ context = (struct imon_context *)file->private_data; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (!context->display) { ++ err("%s: display not supported by device", __func__); ++ retval = -ENODEV; ++ } else if (!context->display_isopen) { ++ err("%s: display is not open", __func__); ++ retval = -EIO; ++ } else { ++ context->display_isopen = 0; ++ dev_info(context->driver->dev, "display port closed\n"); ++ if (!context->dev_present && !context->ir_isopen) { ++ /* ++ * Device disconnected before close and IR port is not ++ * open. If IR port is open, context will be deleted by ++ * ir_close. ++ */ ++ mutex_unlock(&context->ctx_lock); ++ free_imon_context(context); ++ return retval; ++ } ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ return retval; ++} ++ ++/** ++ * Sends a packet to the device -- this function must be called ++ * with context->ctx_lock held. ++ */ ++static int send_packet(struct imon_context *context) ++{ ++ unsigned int pipe; ++ int interval = 0; ++ int retval = 0; ++ struct usb_ctrlrequest *control_req = NULL; ++ ++ /* Check if we need to use control or interrupt urb */ ++ pipe = usb_sndintpipe(context->usbdev, ++ context->tx_endpoint->bEndpointAddress); ++ interval = context->tx_endpoint->bInterval; ++ ++ usb_fill_int_urb(context->tx_urb, context->usbdev, pipe, ++ context->usb_tx_buf, ++ sizeof(context->usb_tx_buf), ++ usb_tx_callback, context, interval); ++ ++ context->tx_urb->actual_length = 0; ++ ++ init_completion(&context->tx.finished); ++ atomic_set(&(context->tx.busy), 1); ++ ++ retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); ++ if (retval) { ++ atomic_set(&(context->tx.busy), 0); ++ err("%s: error submitting urb(%d)", __func__, retval); ++ } else { ++ /* Wait for transmission to complete (or abort) */ ++ mutex_unlock(&context->ctx_lock); ++ retval = wait_for_completion_interruptible( ++ &context->tx.finished); ++ if (retval) ++ err("%s: task interrupted", __func__); ++ mutex_lock(&context->ctx_lock); ++ ++ retval = context->tx.status; ++ if (retval) ++ err("%s: packet tx failed (%d)", __func__, retval); ++ } ++ ++ kfree(control_req); ++ ++ return retval; ++} ++ ++/** ++ * Writes data to the VFD. The iMON VFD is 2x16 characters ++ * and requires data in 5 consecutive USB interrupt packets, ++ * each packet but the last carrying 7 bytes. ++ * ++ * I don't know if the VFD board supports features such as ++ * scrolling, clearing rows, blanking, etc. so at ++ * the caller must provide a full screen of data. If fewer ++ * than 32 bytes are provided spaces will be appended to ++ * generate a full screen. ++ */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int i; ++ int offset; ++ int seq; ++ int retval = 0; ++ struct imon_context *context; ++ const unsigned char vfd_packet6[] = { ++ 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; ++ ++ context = (struct imon_context *)file->private_data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (!context->dev_present) { ++ err("%s: no iMON device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes <= 0 || n_bytes > 32) { ++ err("%s: invalid payload size", __func__); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ if (copy_from_user(context->tx.data_buf, buf, n_bytes)) { ++ retval = -EFAULT; ++ goto exit; ++ } ++ ++ /* Pad with spaces */ ++ for (i = n_bytes; i < 32; ++i) ++ context->tx.data_buf[i] = ' '; ++ ++ for (i = 32; i < 35; ++i) ++ context->tx.data_buf[i] = 0xFF; ++ ++ offset = 0; ++ seq = 0; ++ ++ do { ++ memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7); ++ context->usb_tx_buf[7] = (unsigned char) seq; ++ ++ retval = send_packet(context); ++ if (retval) { ++ err("%s: send packet failed for packet #%d", ++ __func__, seq/2); ++ goto exit; ++ } else { ++ seq += 2; ++ offset += 7; ++ } ++ ++ } while (offset < 35); ++ ++ if (context->vfd_proto_6p) { ++ /* Send packet #6 */ ++ memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); ++ context->usb_tx_buf[7] = (unsigned char) seq; ++ retval = send_packet(context); ++ if (retval) ++ err("%s: send packet failed for packet #%d", ++ __func__, seq/2); ++ } ++ ++exit: ++ mutex_unlock(&context->ctx_lock); ++ ++ return (!retval) ? n_bytes : retval; ++} ++ ++/** ++ * Callback function for USB core API: transmit data ++ */ ++static void usb_tx_callback(struct urb *urb) ++{ ++ struct imon_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct imon_context *)urb->context; ++ if (!context) ++ return; ++ ++ context->tx.status = urb->status; ++ ++ /* notify waiters that write has finished */ ++ atomic_set(&context->tx.busy, 0); ++ complete(&context->tx.finished); ++ ++ return; ++} ++ ++/** ++ * Called by lirc_dev when the application opens /dev/lirc ++ */ ++static int ir_open(void *data) ++{ ++ int retval = 0; ++ struct imon_context *context; ++ ++ /* prevent races with disconnect */ ++ mutex_lock(&driver_lock); ++ ++ context = (struct imon_context *)data; ++ ++ /* initial IR protocol decode variables */ ++ context->rx.count = 0; ++ context->rx.initial_space = 1; ++ context->rx.prev_bit = 0; ++ ++ context->ir_isopen = 1; ++ dev_info(context->driver->dev, "IR port opened\n"); ++ ++ mutex_unlock(&driver_lock); ++ return retval; ++} ++ ++/** ++ * Called by lirc_dev when the application closes /dev/lirc ++ */ ++static void ir_close(void *data) ++{ ++ struct imon_context *context; ++ ++ context = (struct imon_context *)data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ context->ir_isopen = 0; ++ dev_info(context->driver->dev, "IR port closed\n"); ++ ++ if (!context->dev_present) { ++ /* ++ * Device disconnected while IR port was still open. Driver ++ * was not deregistered at disconnect time, so do it now. ++ */ ++ deregister_from_lirc(context); ++ ++ if (!context->display_isopen) { ++ mutex_unlock(&context->ctx_lock); ++ free_imon_context(context); ++ return; ++ } ++ /* ++ * If display port is open, context will be deleted by ++ * display_close ++ */ ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ return; ++} ++ ++/** ++ * Convert bit count to time duration (in us) and submit ++ * the value to lirc_dev. ++ */ ++static void submit_data(struct imon_context *context) ++{ ++ unsigned char buf[4]; ++ int value = context->rx.count; ++ int i; ++ ++ dev_dbg(context->driver->dev, "submitting data to LIRC\n"); ++ ++ value *= BIT_DURATION; ++ value &= PULSE_MASK; ++ if (context->rx.prev_bit) ++ value |= PULSE_BIT; ++ ++ for (i = 0; i < 4; ++i) ++ buf[i] = value>>(i*8); ++ ++ lirc_buffer_write(context->driver->rbuf, buf); ++ wake_up(&context->driver->rbuf->wait_poll); ++ return; ++} ++ ++static inline int tv2int(const struct timeval *a, const struct timeval *b) ++{ ++ int usecs = 0; ++ int sec = 0; ++ ++ if (b->tv_usec > a->tv_usec) { ++ usecs = 1000000; ++ sec--; ++ } ++ ++ usecs += a->tv_usec - b->tv_usec; ++ ++ sec += a->tv_sec - b->tv_sec; ++ sec *= 1000; ++ usecs /= 1000; ++ sec += usecs; ++ ++ if (sec < 0) ++ sec = 1000; ++ ++ return sec; ++} ++ ++/** ++ * Process the incoming packet ++ */ ++static void imon_incoming_packet(struct imon_context *context, ++ struct urb *urb, int intf) ++{ ++ int len = urb->actual_length; ++ unsigned char *buf = urb->transfer_buffer; ++ struct device *dev = context->driver->dev; ++ int octet, bit; ++ unsigned char mask; ++ int i, chunk_num; ++ ++ /* ++ * just bail out if no listening IR client ++ */ ++ if (!context->ir_isopen) ++ return; ++ ++ if (len != 8) { ++ dev_warn(dev, "imon %s: invalid incoming packet " ++ "size (len = %d, intf%d)\n", __func__, len, intf); ++ return; ++ } ++ ++ if (debug) { ++ printk(KERN_INFO "raw packet: "); ++ for (i = 0; i < len; ++i) ++ printk("%02x ", buf[i]); ++ printk("\n"); ++ } ++ ++ /* ++ * Translate received data to pulse and space lengths. ++ * Received data is active low, i.e. pulses are 0 and ++ * spaces are 1. ++ * ++ * My original algorithm was essentially similar to ++ * Changwoo Ryu's with the exception that he switched ++ * the incoming bits to active high and also fed an ++ * initial space to LIRC at the start of a new sequence ++ * if the previous bit was a pulse. ++ * ++ * I've decided to adopt his algorithm. ++ */ ++ ++ if (buf[7] == 1 && context->rx.initial_space) { ++ /* LIRC requires a leading space */ ++ context->rx.prev_bit = 0; ++ context->rx.count = 4; ++ submit_data(context); ++ context->rx.count = 0; ++ } ++ ++ for (octet = 0; octet < 5; ++octet) { ++ mask = 0x80; ++ for (bit = 0; bit < 8; ++bit) { ++ int curr_bit = !(buf[octet] & mask); ++ if (curr_bit != context->rx.prev_bit) { ++ if (context->rx.count) { ++ submit_data(context); ++ context->rx.count = 0; ++ } ++ context->rx.prev_bit = curr_bit; ++ } ++ ++context->rx.count; ++ mask >>= 1; ++ } ++ } ++ ++ if (chunk_num == 10) { ++ if (context->rx.count) { ++ submit_data(context); ++ context->rx.count = 0; ++ } ++ context->rx.initial_space = context->rx.prev_bit; ++ } ++} ++ ++/** ++ * Callback function for USB core API: receive data ++ */ ++static void usb_rx_callback(struct urb *urb) ++{ ++ struct imon_context *context; ++ unsigned char *buf; ++ int len; ++ int intfnum = 0; ++ ++ if (!urb) ++ return; ++ ++ context = (struct imon_context *)urb->context; ++ if (!context) ++ return; ++ ++ buf = urb->transfer_buffer; ++ len = urb->actual_length; ++ ++ switch (urb->status) { ++ case -ENOENT: /* usbcore unlink successful! */ ++ return; ++ ++ case 0: ++ imon_incoming_packet(context, urb, intfnum); ++ break; ++ ++ default: ++ dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n", ++ __func__, urb->status); ++ break; ++ } ++ ++ usb_submit_urb(context->rx_urb, GFP_ATOMIC); ++ ++ return; ++} ++ ++/** ++ * Callback function for USB core API: Probe ++ */ ++static int imon_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *usbdev = NULL; ++ struct usb_host_interface *iface_desc = NULL; ++ struct usb_endpoint_descriptor *rx_endpoint = NULL; ++ struct usb_endpoint_descriptor *tx_endpoint = NULL; ++ struct urb *rx_urb = NULL; ++ struct urb *tx_urb = NULL; ++ struct lirc_driver *driver = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ struct usb_interface *first_if; ++ struct device *dev = &interface->dev; ++ int ifnum; ++ int lirc_minor = 0; ++ int num_endpts; ++ int retval = 0; ++ int display_ep_found = 0; ++ int ir_ep_found = 0; ++ int alloc_status = 0; ++ int vfd_proto_6p = 0; ++ int code_length; ++ struct imon_context *context = NULL; ++ struct imon_context *first_if_context = NULL; ++ int i; ++ u16 vendor, product; ++ ++ /* ++ * Try to auto-detect the type of display if the user hasn't set ++ * it by hand via the display_type modparam. Default is VFD. ++ */ ++ if (usb_match_id(interface, ir_only_list)) ++ context->display = 0; ++ else ++ context->display = 1; ++ ++ code_length = BUF_CHUNK_SIZE * 8; ++ ++ usbdev = usb_get_dev(interface_to_usbdev(interface)); ++ iface_desc = interface->cur_altsetting; ++ num_endpts = iface_desc->desc.bNumEndpoints; ++ ifnum = iface_desc->desc.bInterfaceNumber; ++ vendor = le16_to_cpu(usbdev->descriptor.idVendor); ++ product = le16_to_cpu(usbdev->descriptor.idProduct); ++ ++ dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", ++ __func__, vendor, product, ifnum); ++ ++ /* prevent races probing devices w/multiple interfaces */ ++ mutex_lock(&driver_lock); ++ ++ first_if = usb_ifnum_to_if(usbdev, 0); ++ first_if_context = (struct imon_context *)usb_get_intfdata(first_if); ++ ++ /* ++ * Scan the endpoint list and set: ++ * first input endpoint = IR endpoint ++ * first output endpoint = display endpoint ++ */ ++ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { ++ struct usb_endpoint_descriptor *ep; ++ int ep_dir; ++ int ep_type; ++ ep = &iface_desc->endpoint[i].desc; ++ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ++ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if (!ir_ep_found && ++ ep_dir == USB_DIR_IN && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ rx_endpoint = ep; ++ ir_ep_found = 1; ++ dev_dbg(dev, "%s: found IR endpoint\n", __func__); ++ ++ } else if (!display_ep_found && ep_dir == USB_DIR_OUT && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ tx_endpoint = ep; ++ display_ep_found = 1; ++ dev_dbg(dev, "%s: found display endpoint\n", __func__); ++ } ++ } ++ ++ /* ++ * Some iMON receivers have no display. Unfortunately, it seems ++ * that SoundGraph recycles device IDs between devices both with ++ * and without... :\ ++ */ ++ if (context->display == 0) { ++ display_ep_found = 0; ++ dev_dbg(dev, "%s: device has no display\n", __func__); ++ } ++ ++ /* Input endpoint is mandatory */ ++ if (!ir_ep_found) { ++ err("%s: no valid input (IR) endpoint found.", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ /* Determine if display requires 6 packets */ ++ if (display_ep_found) { ++ if (usb_match_id(interface, vfd_proto_6p_list)) ++ vfd_proto_6p = 1; ++ ++ dev_dbg(dev, "%s: vfd_proto_6p: %d\n", ++ __func__, vfd_proto_6p); ++ } ++ ++ context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); ++ if (!context) { ++ err("%s: kzalloc failed for context", __func__); ++ alloc_status = 1; ++ goto alloc_status_switch; ++ } ++ driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); ++ if (!driver) { ++ err("%s: kzalloc failed for lirc_driver", __func__); ++ alloc_status = 2; ++ goto alloc_status_switch; ++ } ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ err("%s: kmalloc failed for lirc_buffer", __func__); ++ alloc_status = 3; ++ goto alloc_status_switch; ++ } ++ if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) { ++ err("%s: lirc_buffer_init failed", __func__); ++ alloc_status = 4; ++ goto alloc_status_switch; ++ } ++ rx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!rx_urb) { ++ err("%s: usb_alloc_urb failed for IR urb", __func__); ++ alloc_status = 5; ++ goto alloc_status_switch; ++ } ++ tx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!tx_urb) { ++ err("%s: usb_alloc_urb failed for display urb", ++ __func__); ++ alloc_status = 6; ++ goto alloc_status_switch; ++ } ++ ++ mutex_init(&context->ctx_lock); ++ context->vfd_proto_6p = vfd_proto_6p; ++ ++ strcpy(driver->name, MOD_NAME); ++ driver->minor = -1; ++ driver->code_length = sizeof(int) * 8; ++ driver->sample_rate = 0; ++ driver->features = LIRC_CAN_REC_MODE2; ++ driver->data = context; ++ driver->rbuf = rbuf; ++ driver->set_use_inc = ir_open; ++ driver->set_use_dec = ir_close; ++ driver->dev = &interface->dev; ++ driver->owner = THIS_MODULE; ++ ++ mutex_lock(&context->ctx_lock); ++ ++ context->driver = driver; ++ /* start out in keyboard mode */ ++ ++ lirc_minor = lirc_register_driver(driver); ++ if (lirc_minor < 0) { ++ err("%s: lirc_register_driver failed", __func__); ++ alloc_status = 7; ++ goto alloc_status_switch; ++ } else ++ dev_info(dev, "Registered iMON driver " ++ "(lirc minor: %d)\n", lirc_minor); ++ ++ /* Needed while unregistering! */ ++ driver->minor = lirc_minor; ++ ++ context->usbdev = usbdev; ++ context->dev_present = 1; ++ context->rx_endpoint = rx_endpoint; ++ context->rx_urb = rx_urb; ++ ++ /* ++ * tx is used to send characters to lcd/vfd, associate RF ++ * remotes, set IR protocol, and maybe more... ++ */ ++ context->tx_endpoint = tx_endpoint; ++ context->tx_urb = tx_urb; ++ ++ if (display_ep_found) ++ context->display = 1; ++ ++ usb_fill_int_urb(context->rx_urb, context->usbdev, ++ usb_rcvintpipe(context->usbdev, ++ context->rx_endpoint->bEndpointAddress), ++ context->usb_rx_buf, sizeof(context->usb_rx_buf), ++ usb_rx_callback, context, ++ context->rx_endpoint->bInterval); ++ ++ retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); ++ ++ if (retval) { ++ err("%s: usb_submit_urb failed for intf0 (%d)", ++ __func__, retval); ++ mutex_unlock(&context->ctx_lock); ++ goto exit; ++ } ++ ++ usb_set_intfdata(interface, context); ++ ++ if (context->display && ifnum == 0) { ++ dev_dbg(dev, "%s: Registering iMON display with sysfs\n", ++ __func__); ++ ++ if (usb_register_dev(interface, &imon_class)) { ++ /* Not a fatal error, so ignore */ ++ dev_info(dev, "%s: could not get a minor number for " ++ "display\n", __func__); ++ } ++ } ++ ++ dev_info(dev, "iMON device (%04x:%04x, intf%d) on " ++ "usb<%d:%d> initialized\n", vendor, product, ifnum, ++ usbdev->bus->busnum, usbdev->devnum); ++ ++alloc_status_switch: ++ mutex_unlock(&context->ctx_lock); ++ ++ switch (alloc_status) { ++ case 7: ++ usb_free_urb(tx_urb); ++ case 6: ++ usb_free_urb(rx_urb); ++ case 5: ++ if (rbuf) ++ lirc_buffer_free(rbuf); ++ case 4: ++ kfree(rbuf); ++ case 3: ++ kfree(driver); ++ case 2: ++ kfree(context); ++ context = NULL; ++ case 1: ++ retval = -ENOMEM; ++ break; ++ case 0: ++ retval = 0; ++ } ++ ++exit: ++ mutex_unlock(&driver_lock); ++ ++ return retval; ++} ++ ++/** ++ * Callback function for USB core API: disconnect ++ */ ++static void imon_disconnect(struct usb_interface *interface) ++{ ++ struct imon_context *context; ++ int ifnum; ++ ++ /* prevent races with ir_open()/display_open() */ ++ mutex_lock(&driver_lock); ++ ++ context = usb_get_intfdata(interface); ++ ifnum = interface->cur_altsetting->desc.bInterfaceNumber; ++ ++ mutex_lock(&context->ctx_lock); ++ ++ usb_set_intfdata(interface, NULL); ++ ++ /* Abort ongoing write */ ++ if (atomic_read(&context->tx.busy)) { ++ usb_kill_urb(context->tx_urb); ++ complete_all(&context->tx.finished); ++ } ++ ++ context->dev_present = 0; ++ usb_kill_urb(context->rx_urb); ++ if (context->display) ++ usb_deregister_dev(interface, &imon_class); ++ ++ if (!context->ir_isopen && !context->dev_present) { ++ deregister_from_lirc(context); ++ mutex_unlock(&context->ctx_lock); ++ if (!context->display_isopen) ++ free_imon_context(context); ++ } else ++ mutex_unlock(&context->ctx_lock); ++ ++ mutex_unlock(&driver_lock); ++ ++ printk(KERN_INFO "%s: iMON device (intf%d) disconnected\n", ++ __func__, ifnum); ++} ++ ++static int imon_suspend(struct usb_interface *intf, pm_message_t message) ++{ ++ struct imon_context *context = usb_get_intfdata(intf); ++ ++ usb_kill_urb(context->rx_urb); ++ ++ return 0; ++} ++ ++static int imon_resume(struct usb_interface *intf) ++{ ++ int rc = 0; ++ struct imon_context *context = usb_get_intfdata(intf); ++ ++ usb_fill_int_urb(context->rx_urb, context->usbdev, ++ usb_rcvintpipe(context->usbdev, ++ context->rx_endpoint->bEndpointAddress), ++ context->usb_rx_buf, sizeof(context->usb_rx_buf), ++ usb_rx_callback, context, ++ context->rx_endpoint->bInterval); ++ ++ rc = usb_submit_urb(context->rx_urb, GFP_ATOMIC); ++ ++ return rc; ++} ++ ++static int __init imon_init(void) ++{ ++ int rc; ++ ++ printk(KERN_INFO MOD_NAME ": " MOD_DESC ", v" MOD_VERSION "\n"); ++ ++ rc = usb_register(&imon_driver); ++ if (rc) { ++ err("%s: usb register failed(%d)", __func__, rc); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static void __exit imon_exit(void) ++{ ++ usb_deregister(&imon_driver); ++ printk(KERN_INFO MOD_NAME ": module removed. Goodbye!\n"); ++} ++ ++module_init(imon_init); ++module_exit(imon_exit); +diff --git a/drivers/input/lirc/lirc_it87.c b/drivers/input/lirc/lirc_it87.c +new file mode 100644 +index 0000000..c69662d +--- /dev/null ++++ b/drivers/input/lirc/lirc_it87.c +@@ -0,0 +1,991 @@ ++/* ++ * LIRC driver for ITE IT8712/IT8705 CIR port ++ * ++ * Copyright (C) 2001 Hans-Gunter Lutke Uphues ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 ++ * USA ++ * ++ * ITE IT8705 and IT8712(not tested) and IT8720 CIR-port support for lirc based ++ * via cut and paste from lirc_sir.c (C) 2000 Milan Pikula ++ * ++ * Attention: Sendmode only tested with debugging logs ++ * ++ * 2001/02/27 Christoph Bartelmus : ++ * reimplemented read function ++ * 2005/06/05 Andrew Calkin implemented support for Asus Digimatrix, ++ * based on work of the following member of the Outertrack Digimatrix ++ * Forum: Art103 ++ * 2009/12/24 James Edwards implemeted support ++ * for ITE8704/ITE8718, on my machine, the DSDT reports 8704, but the ++ * chip identifies as 18. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++#include "lirc_it87.h" ++ ++#ifdef LIRC_IT87_DIGIMATRIX ++static int digimatrix = 1; ++static int it87_freq = 36; /* kHz */ ++static int irq = 9; ++#else ++static int digimatrix; ++static int it87_freq = 38; /* kHz */ ++static int irq = IT87_CIR_DEFAULT_IRQ; ++#endif ++ ++static unsigned long it87_bits_in_byte_out; ++static unsigned long it87_send_counter; ++static unsigned char it87_RXEN_mask = IT87_CIR_RCR_RXEN; ++ ++#define RBUF_LEN 1024 ++#define WBUF_LEN 1024 ++ ++#define LIRC_DRIVER_NAME "lirc_it87" ++ ++/* timeout for sequences in jiffies (=5/100s) */ ++/* must be longer than TIME_CONST */ ++#define IT87_TIMEOUT (HZ*5/100) ++ ++/* module parameters */ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++static int io = IT87_CIR_DEFAULT_IOBASE; ++/* receiver demodulator default: off */ ++static int it87_enable_demodulator; ++ ++static int timer_enabled; ++static DEFINE_SPINLOCK(timer_lock); ++static struct timer_list timerlist; ++/* time of last signal change detected */ ++static struct timeval last_tv = {0, 0}; ++/* time of last UART data ready interrupt */ ++static struct timeval last_intr_tv = {0, 0}; ++static int last_value; ++ ++static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue); ++ ++static DEFINE_SPINLOCK(hardware_lock); ++static DEFINE_SPINLOCK(dev_lock); ++ ++static int rx_buf[RBUF_LEN]; ++unsigned int rx_tail, rx_head; ++static int tx_buf[WBUF_LEN]; ++ ++/* SECTION: Prototypes */ ++ ++/* Communication with user-space */ ++static int lirc_open(struct inode *inode, struct file *file); ++static int lirc_close(struct inode *inode, struct file *file); ++static unsigned int lirc_poll(struct file *file, poll_table *wait); ++static ssize_t lirc_read(struct file *file, char *buf, ++ size_t count, loff_t *ppos); ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *pos); ++static int lirc_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg); ++static void add_read_queue(int flag, unsigned long val); ++static int init_chrdev(void); ++static void drop_chrdev(void); ++/* Hardware */ ++static irqreturn_t it87_interrupt(int irq, void *dev_id); ++static void send_space(unsigned long len); ++static void send_pulse(unsigned long len); ++static void init_send(void); ++static void terminate_send(unsigned long len); ++static int init_hardware(void); ++static void drop_hardware(void); ++/* Initialisation */ ++static int init_port(void); ++static void drop_port(void); ++ ++ ++/* SECTION: Communication with user-space */ ++ ++static int lirc_open(struct inode *inode, struct file *file) ++{ ++ spin_lock(&dev_lock); ++ if (module_refcount(THIS_MODULE)) { ++ spin_unlock(&dev_lock); ++ return -EBUSY; ++ } ++ spin_unlock(&dev_lock); ++ return 0; ++} ++ ++ ++static int lirc_close(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++ ++static unsigned int lirc_poll(struct file *file, poll_table *wait) ++{ ++ poll_wait(file, &lirc_read_queue, wait); ++ if (rx_head != rx_tail) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++ ++ ++static ssize_t lirc_read(struct file *file, char *buf, ++ size_t count, loff_t *ppos) ++{ ++ int n = 0; ++ int retval = 0; ++ ++ while (n < count) { ++ if (file->f_flags & O_NONBLOCK && rx_head == rx_tail) { ++ retval = -EAGAIN; ++ break; ++ } ++ retval = wait_event_interruptible(lirc_read_queue, ++ rx_head != rx_tail); ++ if (retval) ++ break; ++ ++ if (copy_to_user((void *) buf + n, (void *) (rx_buf + rx_head), ++ sizeof(int))) { ++ retval = -EFAULT; ++ break; ++ } ++ rx_head = (rx_head + 1) & (RBUF_LEN - 1); ++ n += sizeof(int); ++ } ++ if (n) ++ return n; ++ return retval; ++} ++ ++ ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *pos) ++{ ++ int i = 0; ++ ++ if (n % sizeof(int) || (n / sizeof(int)) > WBUF_LEN) ++ return -EINVAL; ++ if (copy_from_user(tx_buf, buf, n)) ++ return -EFAULT; ++ n /= sizeof(int); ++ init_send(); ++ while (1) { ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_pulse(tx_buf[i]); ++ i++; ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_space(tx_buf[i]); ++ i++; ++ } ++ terminate_send(tx_buf[i - 1]); ++ return n; ++} ++ ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg) ++{ ++ int retval = 0; ++ unsigned long value = 0; ++ unsigned int ivalue; ++ unsigned long hw_flags; ++ ++ if (cmd == LIRC_GET_FEATURES) ++ value = LIRC_CAN_SEND_PULSE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_REC_MODE2; ++ else if (cmd == LIRC_GET_SEND_MODE) ++ value = LIRC_MODE_PULSE; ++ else if (cmd == LIRC_GET_REC_MODE) ++ value = LIRC_MODE_MODE2; ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ case LIRC_GET_SEND_MODE: ++ case LIRC_GET_REC_MODE: ++ retval = put_user(value, (unsigned long *) arg); ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ case LIRC_SET_REC_MODE: ++ retval = get_user(value, (unsigned long *) arg); ++ break; ++ ++ case LIRC_SET_SEND_CARRIER: ++ retval = get_user(ivalue, (unsigned int *) arg); ++ if (retval) ++ return retval; ++ ivalue /= 1000; ++ if (ivalue > IT87_CIR_FREQ_MAX || ++ ivalue < IT87_CIR_FREQ_MIN) ++ return -EINVAL; ++ ++ it87_freq = ivalue; ++ ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ outb(((inb(io + IT87_CIR_TCR2) & IT87_CIR_TCR2_TXMPW) | ++ (it87_freq - IT87_CIR_FREQ_MIN) << 3), ++ io + IT87_CIR_TCR2); ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ dprintk("demodulation frequency: %d kHz\n", it87_freq); ++ ++ break; ++ ++ default: ++ retval = -EINVAL; ++ } ++ ++ if (retval) ++ return retval; ++ ++ if (cmd == LIRC_SET_REC_MODE) { ++ if (value != LIRC_MODE_MODE2) ++ retval = -ENOSYS; ++ } else if (cmd == LIRC_SET_SEND_MODE) { ++ if (value != LIRC_MODE_PULSE) ++ retval = -ENOSYS; ++ } ++ return retval; ++} ++ ++static void add_read_queue(int flag, unsigned long val) ++{ ++ unsigned int new_rx_tail; ++ int newval; ++ ++ dprintk("add flag %d with val %lu\n", flag, val); ++ ++ newval = val & PULSE_MASK; ++ ++ /* ++ * statistically, pulses are ~TIME_CONST/2 too long. we could ++ * maybe make this more exact, but this is good enough ++ */ ++ if (flag) { ++ /* pulse */ ++ if (newval > TIME_CONST / 2) ++ newval -= TIME_CONST / 2; ++ else /* should not ever happen */ ++ newval = 1; ++ newval |= PULSE_BIT; ++ } else ++ newval += TIME_CONST / 2; ++ new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1); ++ if (new_rx_tail == rx_head) { ++ dprintk("Buffer overrun.\n"); ++ return; ++ } ++ rx_buf[rx_tail] = newval; ++ rx_tail = new_rx_tail; ++ wake_up_interruptible(&lirc_read_queue); ++} ++ ++ ++static struct file_operations lirc_fops = { ++ .owner = THIS_MODULE, ++ .read = lirc_read, ++ .write = lirc_write, ++ .poll = lirc_poll, ++ .ioctl = lirc_ioctl, ++ .open = lirc_open, ++ .release = lirc_close, ++}; ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++static struct lirc_driver driver = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++ ++#ifdef MODULE ++static int init_chrdev(void) ++{ ++ driver.minor = lirc_register_driver(&driver); ++ ++ if (driver.minor < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++ ++static void drop_chrdev(void) ++{ ++ lirc_unregister_driver(driver.minor); ++} ++#endif ++ ++ ++/* SECTION: Hardware */ ++static long delta(struct timeval *tv1, struct timeval *tv2) ++{ ++ unsigned long deltv; ++ ++ deltv = tv2->tv_sec - tv1->tv_sec; ++ if (deltv > 15) ++ deltv = 0xFFFFFF; ++ else ++ deltv = deltv*1000000 + tv2->tv_usec - tv1->tv_usec; ++ return deltv; ++} ++ ++static void it87_timeout(unsigned long data) ++{ ++ unsigned long flags; ++ ++ /* avoid interference with interrupt */ ++ spin_lock_irqsave(&timer_lock, flags); ++ ++ if (digimatrix) { ++ /* We have timed out. Disable the RX mechanism. */ ++ ++ outb((inb(io + IT87_CIR_RCR) & ~IT87_CIR_RCR_RXEN) | ++ IT87_CIR_RCR_RXACT, io + IT87_CIR_RCR); ++ if (it87_RXEN_mask) ++ outb(inb(io + IT87_CIR_RCR) | IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ dprintk(" TIMEOUT\n"); ++ timer_enabled = 0; ++ ++ /* fifo clear */ ++ outb(inb(io + IT87_CIR_TCR1) | IT87_CIR_TCR1_FIFOCLR, ++ io+IT87_CIR_TCR1); ++ ++ } else { ++ /* ++ * if last received signal was a pulse, but receiving stopped ++ * within the 9 bit frame, we need to finish this pulse and ++ * simulate a signal change to from pulse to space. Otherwise ++ * upper layers will receive two sequences next time. ++ */ ++ ++ if (last_value) { ++ unsigned long pulse_end; ++ ++ /* determine 'virtual' pulse end: */ ++ pulse_end = delta(&last_tv, &last_intr_tv); ++ dprintk("timeout add %d for %lu usec\n", ++ last_value, pulse_end); ++ add_read_queue(last_value, pulse_end); ++ last_value = 0; ++ last_tv = last_intr_tv; ++ } ++ } ++ spin_unlock_irqrestore(&timer_lock, flags); ++} ++ ++static irqreturn_t it87_interrupt(int irq, void *dev_id) ++{ ++ unsigned char data; ++ struct timeval curr_tv; ++ static unsigned long deltv; ++ unsigned long deltintrtv; ++ unsigned long flags, hw_flags; ++ int iir, lsr; ++ int fifo = 0; ++ static char lastbit; ++ char bit; ++ ++ /* Bit duration in microseconds */ ++ const unsigned long bit_duration = 1000000ul / ++ (115200 / IT87_CIR_BAUDRATE_DIVISOR); ++ ++ ++ iir = inb(io + IT87_CIR_IIR); ++ ++ switch (iir & IT87_CIR_IIR_IID) { ++ case 0x4: ++ case 0x6: ++ lsr = inb(io + IT87_CIR_RSR) & (IT87_CIR_RSR_RXFTO | ++ IT87_CIR_RSR_RXFBC); ++ fifo = lsr & IT87_CIR_RSR_RXFBC; ++ dprintk("iir: 0x%x fifo: 0x%x\n", iir, lsr); ++ ++ /* avoid interference with timer */ ++ spin_lock_irqsave(&timer_lock, flags); ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ if (digimatrix) { ++ static unsigned long acc_pulse; ++ static unsigned long acc_space; ++ ++ do { ++ data = inb(io + IT87_CIR_DR); ++ data = ~data; ++ fifo--; ++ if (data != 0x00) { ++ if (timer_enabled) ++ del_timer(&timerlist); ++ /* ++ * start timer for end of ++ * sequence detection ++ */ ++ timerlist.expires = jiffies + ++ IT87_TIMEOUT; ++ add_timer(&timerlist); ++ timer_enabled = 1; ++ } ++ /* Loop through */ ++ for (bit = 0; bit < 8; ++bit) { ++ if ((data >> bit) & 1) { ++ ++acc_pulse; ++ if (lastbit == 0) { ++ add_read_queue(0, ++ acc_space * ++ bit_duration); ++ acc_space = 0; ++ } ++ } else { ++ ++acc_space; ++ if (lastbit == 1) { ++ add_read_queue(1, ++ acc_pulse * ++ bit_duration); ++ acc_pulse = 0; ++ } ++ } ++ lastbit = (data >> bit) & 1; ++ } ++ ++ } while (fifo != 0); ++ } else { /* Normal Operation */ ++ do { ++ del_timer(&timerlist); ++ data = inb(io + IT87_CIR_DR); ++ ++ dprintk("data=%02x\n", data); ++ do_gettimeofday(&curr_tv); ++ deltv = delta(&last_tv, &curr_tv); ++ deltintrtv = delta(&last_intr_tv, &curr_tv); ++ ++ dprintk("t %lu , d %d\n", ++ deltintrtv, (int)data); ++ ++ /* ++ * if nothing came in last 2 cycles, ++ * it was gap ++ */ ++ if (deltintrtv > TIME_CONST * 2) { ++ if (last_value) { ++ dprintk("GAP\n"); ++ ++ /* simulate signal change */ ++ add_read_queue(last_value, ++ deltv - ++ deltintrtv); ++ last_value = 0; ++ last_tv.tv_sec = ++ last_intr_tv.tv_sec; ++ last_tv.tv_usec = ++ last_intr_tv.tv_usec; ++ deltv = deltintrtv; ++ } ++ } ++ data = 1; ++ if (data ^ last_value) { ++ /* ++ * deltintrtv > 2*TIME_CONST, ++ * remember ? the other case is ++ * timeout ++ */ ++ add_read_queue(last_value, ++ deltv-TIME_CONST); ++ last_value = data; ++ last_tv = curr_tv; ++ if (last_tv.tv_usec >= TIME_CONST) ++ last_tv.tv_usec -= TIME_CONST; ++ else { ++ last_tv.tv_sec--; ++ last_tv.tv_usec += 1000000 - ++ TIME_CONST; ++ } ++ } ++ last_intr_tv = curr_tv; ++ if (data) { ++ /* ++ * start timer for end of ++ * sequence detection ++ */ ++ timerlist.expires = ++ jiffies + IT87_TIMEOUT; ++ add_timer(&timerlist); ++ } ++ outb((inb(io + IT87_CIR_RCR) & ++ ~IT87_CIR_RCR_RXEN) | ++ IT87_CIR_RCR_RXACT, ++ io + IT87_CIR_RCR); ++ if (it87_RXEN_mask) ++ outb(inb(io + IT87_CIR_RCR) | ++ IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ fifo--; ++ } while (fifo != 0); ++ } ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ spin_unlock_irqrestore(&timer_lock, flags); ++ ++ return IRQ_RETVAL(IRQ_HANDLED); ++ ++ default: ++ /* not our irq */ ++ dprintk("unknown IRQ (shouldn't happen) !!\n"); ++ return IRQ_RETVAL(IRQ_NONE); ++ } ++} ++ ++ ++static void send_it87(unsigned long len, unsigned long stime, ++ unsigned char send_byte, unsigned int count_bits) ++{ ++ long count = len / stime; ++ long time_left = 0; ++ static unsigned char byte_out; ++ unsigned long hw_flags; ++ ++ dprintk("%s: len=%ld, sb=%d\n", __func__, len, send_byte); ++ ++ time_left = (long)len - (long)count * (long)stime; ++ count += ((2 * time_left) / stime); ++ while (count) { ++ long i = 0; ++ for (i = 0; i < count_bits; i++) { ++ byte_out = (byte_out << 1) | (send_byte & 1); ++ it87_bits_in_byte_out++; ++ } ++ if (it87_bits_in_byte_out == 8) { ++ dprintk("out=0x%x, tsr_txfbc: 0x%x\n", ++ byte_out, ++ inb(io + IT87_CIR_TSR) & ++ IT87_CIR_TSR_TXFBC); ++ ++ while ((inb(io + IT87_CIR_TSR) & ++ IT87_CIR_TSR_TXFBC) >= IT87_CIR_FIFO_SIZE) ++ ; ++ ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ outb(byte_out, io + IT87_CIR_DR); ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ ++ it87_bits_in_byte_out = 0; ++ it87_send_counter++; ++ byte_out = 0; ++ } ++ count--; ++ } ++} ++ ++ ++/*TODO: maybe exchange space and pulse because it8705 only modulates 0-bits */ ++ ++static void send_space(unsigned long len) ++{ ++ send_it87(len, TIME_CONST, IT87_CIR_SPACE, IT87_CIR_BAUDRATE_DIVISOR); ++} ++ ++static void send_pulse(unsigned long len) ++{ ++ send_it87(len, TIME_CONST, IT87_CIR_PULSE, IT87_CIR_BAUDRATE_DIVISOR); ++} ++ ++ ++static void init_send() ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ /* RXEN=0: receiver disable */ ++ it87_RXEN_mask = 0; ++ outb(inb(io + IT87_CIR_RCR) & ~IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ spin_unlock_irqrestore(&hardware_lock, flags); ++ it87_bits_in_byte_out = 0; ++ it87_send_counter = 0; ++} ++ ++ ++static void terminate_send(unsigned long len) ++{ ++ unsigned long flags; ++ unsigned long last = 0; ++ ++ last = it87_send_counter; ++ /* make sure all necessary data has been sent */ ++ while (last == it87_send_counter) ++ send_space(len); ++ /* wait until all data sent */ ++ while ((inb(io + IT87_CIR_TSR) & IT87_CIR_TSR_TXFBC) != 0) ++ ; ++ /* then re-enable receiver */ ++ spin_lock_irqsave(&hardware_lock, flags); ++ it87_RXEN_mask = IT87_CIR_RCR_RXEN; ++ outb(inb(io + IT87_CIR_RCR) | IT87_CIR_RCR_RXEN, ++ io + IT87_CIR_RCR); ++ spin_unlock_irqrestore(&hardware_lock, flags); ++} ++ ++ ++static int init_hardware(void) ++{ ++ unsigned long flags; ++ unsigned char it87_rcr = 0; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ /* init cir-port */ ++ /* enable r/w-access to Baudrate-Register */ ++ outb(IT87_CIR_IER_BR, io + IT87_CIR_IER); ++ outb(IT87_CIR_BAUDRATE_DIVISOR % 0x100, io+IT87_CIR_BDLR); ++ outb(IT87_CIR_BAUDRATE_DIVISOR / 0x100, io+IT87_CIR_BDHR); ++ /* Baudrate Register off, define IRQs: Input only */ ++ if (digimatrix) { ++ outb(IT87_CIR_IER_IEC | IT87_CIR_IER_RFOIE, io + IT87_CIR_IER); ++ /* RX: HCFS=0, RXDCR = 001b (33,75..38,25 kHz), RXEN=1 */ ++ } else { ++ outb(IT87_CIR_IER_IEC | IT87_CIR_IER_RDAIE, io + IT87_CIR_IER); ++ /* RX: HCFS=0, RXDCR = 001b (35,6..40,3 kHz), RXEN=1 */ ++ } ++ it87_rcr = (IT87_CIR_RCR_RXEN & it87_RXEN_mask) | 0x1; ++ if (it87_enable_demodulator) ++ it87_rcr |= IT87_CIR_RCR_RXEND; ++ outb(it87_rcr, io + IT87_CIR_RCR); ++ if (digimatrix) { ++ /* Set FIFO depth to 1 byte, and disable TX */ ++ outb(inb(io + IT87_CIR_TCR1) | 0x00, ++ io + IT87_CIR_TCR1); ++ ++ /* ++ * TX: it87_freq (36kHz), 'reserved' sensitivity ++ * setting (0x00) ++ */ ++ outb(((it87_freq - IT87_CIR_FREQ_MIN) << 3) | 0x00, ++ io + IT87_CIR_TCR2); ++ } else { ++ /* TX: 38kHz, 13,3us (pulse-width) */ ++ outb(((it87_freq - IT87_CIR_FREQ_MIN) << 3) | 0x06, ++ io + IT87_CIR_TCR2); ++ } ++ spin_unlock_irqrestore(&hardware_lock, flags); ++ return 0; ++} ++ ++ ++static void drop_hardware(void) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ disable_irq(irq); ++ /* receiver disable */ ++ it87_RXEN_mask = 0; ++ outb(0x1, io + IT87_CIR_RCR); ++ /* turn off irqs */ ++ outb(0, io + IT87_CIR_IER); ++ /* fifo clear */ ++ outb(IT87_CIR_TCR1_FIFOCLR, io+IT87_CIR_TCR1); ++ /* reset */ ++ outb(IT87_CIR_IER_RESET, io+IT87_CIR_IER); ++ enable_irq(irq); ++ spin_unlock_irqrestore(&hardware_lock, flags); ++} ++ ++ ++static unsigned char it87_read(unsigned char port) ++{ ++ outb(port, IT87_ADRPORT); ++ return inb(IT87_DATAPORT); ++} ++ ++ ++static void it87_write(unsigned char port, unsigned char data) ++{ ++ outb(port, IT87_ADRPORT); ++ outb(data, IT87_DATAPORT); ++} ++ ++ ++/* SECTION: Initialisation */ ++ ++static int init_port(void) ++{ ++ unsigned long hw_flags; ++ int retval = 0; ++ ++ unsigned char init_bytes[4] = IT87_INIT; ++ unsigned char it87_chipid = 0; ++ unsigned char ldn = 0; ++ unsigned int it87_io = 0; ++ unsigned int it87_irq = 0; ++ ++ /* Enter MB PnP Mode */ ++ outb(init_bytes[0], IT87_ADRPORT); ++ outb(init_bytes[1], IT87_ADRPORT); ++ outb(init_bytes[2], IT87_ADRPORT); ++ outb(init_bytes[3], IT87_ADRPORT); ++ ++ /* 8712 or 8705 ? */ ++ it87_chipid = it87_read(IT87_CHIP_ID1); ++ if (it87_chipid != 0x87) { ++ retval = -ENXIO; ++ return retval; ++ } ++ it87_chipid = it87_read(IT87_CHIP_ID2); ++ if ((it87_chipid != 0x05) && ++ (it87_chipid != 0x12) && ++ (it87_chipid != 0x18) && ++ (it87_chipid != 0x20)) { ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": no IT8704/05/12/18/20 found (claimed IT87%02x), " ++ "exiting..\n", it87_chipid); ++ retval = -ENXIO; ++ return retval; ++ } ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": found IT87%02x.\n", ++ it87_chipid); ++ ++ /* get I/O-Port and IRQ */ ++ if (it87_chipid == 0x12 || it87_chipid == 0x18) ++ ldn = IT8712_CIR_LDN; ++ else ++ ldn = IT8705_CIR_LDN; ++ it87_write(IT87_LDN, ldn); ++ ++ it87_io = it87_read(IT87_CIR_BASE_MSB) * 256 + ++ it87_read(IT87_CIR_BASE_LSB); ++ if (it87_io == 0) { ++ if (io == 0) ++ io = IT87_CIR_DEFAULT_IOBASE; ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": set default io 0x%x\n", ++ io); ++ it87_write(IT87_CIR_BASE_MSB, io / 0x100); ++ it87_write(IT87_CIR_BASE_LSB, io % 0x100); ++ } else ++ io = it87_io; ++ ++ it87_irq = it87_read(IT87_CIR_IRQ); ++ if (digimatrix || it87_irq == 0) { ++ if (irq == 0) ++ irq = IT87_CIR_DEFAULT_IRQ; ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": set default irq 0x%x\n", ++ irq); ++ it87_write(IT87_CIR_IRQ, irq); ++ } else ++ irq = it87_irq; ++ ++ spin_lock_irqsave(&hardware_lock, hw_flags); ++ /* reset */ ++ outb(IT87_CIR_IER_RESET, io+IT87_CIR_IER); ++ /* fifo clear */ ++ outb(IT87_CIR_TCR1_FIFOCLR | ++ /* IT87_CIR_TCR1_ILE | */ ++ IT87_CIR_TCR1_TXRLE | ++ IT87_CIR_TCR1_TXENDF, io+IT87_CIR_TCR1); ++ spin_unlock_irqrestore(&hardware_lock, hw_flags); ++ ++ /* get I/O port access and IRQ line */ ++ if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": i/o port 0x%.4x already in use.\n", io); ++ /* Leaving MB PnP Mode */ ++ it87_write(IT87_CFGCTRL, 0x2); ++ return -EBUSY; ++ } ++ ++ /* activate CIR-Device */ ++ it87_write(IT87_CIR_ACT, 0x1); ++ ++ /* Leaving MB PnP Mode */ ++ it87_write(IT87_CFGCTRL, 0x2); ++ ++ retval = request_irq(irq, it87_interrupt, 0 /*IRQF_DISABLED*/, ++ LIRC_DRIVER_NAME, NULL); ++ if (retval < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": IRQ %d already in use.\n", ++ irq); ++ release_region(io, 8); ++ return retval; ++ } ++ ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": I/O port 0x%.4x, IRQ %d.\n", io, irq); ++ ++ init_timer(&timerlist); ++ timerlist.function = it87_timeout; ++ timerlist.data = 0xabadcafe; ++ ++ return 0; ++} ++ ++ ++static void drop_port(void) ++{ ++#if 0 ++ unsigned char init_bytes[4] = IT87_INIT; ++ ++ /* Enter MB PnP Mode */ ++ outb(init_bytes[0], IT87_ADRPORT); ++ outb(init_bytes[1], IT87_ADRPORT); ++ outb(init_bytes[2], IT87_ADRPORT); ++ outb(init_bytes[3], IT87_ADRPORT); ++ ++ /* deactivate CIR-Device */ ++ it87_write(IT87_CIR_ACT, 0x0); ++ ++ /* Leaving MB PnP Mode */ ++ it87_write(IT87_CFGCTRL, 0x2); ++#endif ++ ++ del_timer_sync(&timerlist); ++ free_irq(irq, NULL); ++ release_region(io, 8); ++} ++ ++ ++static int init_lirc_it87(void) ++{ ++ int retval; ++ ++ init_waitqueue_head(&lirc_read_queue); ++ retval = init_port(); ++ if (retval < 0) ++ return retval; ++ init_hardware(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Installed.\n"); ++ return 0; ++} ++ ++ ++static int __init lirc_it87_init(void) ++{ ++ int retval; ++ ++ retval = init_chrdev(); ++ if (retval < 0) ++ return retval; ++ retval = init_lirc_it87(); ++ if (retval) { ++ drop_chrdev(); ++ return retval; ++ } ++ return 0; ++} ++ ++ ++static void __exit lirc_it87_exit(void) ++{ ++ drop_hardware(); ++ drop_chrdev(); ++ drop_port(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); ++} ++ ++module_init(lirc_it87_init); ++module_exit(lirc_it87_exit); ++ ++MODULE_DESCRIPTION("LIRC driver for ITE IT8704/05/12/18/20 CIR port"); ++MODULE_AUTHOR("Hans-Gunter Lutke Uphues"); ++MODULE_LICENSE("GPL"); ++ ++module_param(io, int, S_IRUGO); ++MODULE_PARM_DESC(io, "I/O base address (default: 0x310)"); ++ ++module_param(irq, int, S_IRUGO); ++#ifdef LIRC_IT87_DIGIMATRIX ++MODULE_PARM_DESC(irq, "Interrupt (1,3-12) (default: 9)"); ++#else ++MODULE_PARM_DESC(irq, "Interrupt (1,3-12) (default: 7)"); ++#endif ++ ++module_param(it87_enable_demodulator, bool, S_IRUGO); ++MODULE_PARM_DESC(it87_enable_demodulator, ++ "Receiver demodulator enable/disable (1/0), default: 0"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_param(digimatrix, bool, S_IRUGO | S_IWUSR); ++#ifdef LIRC_IT87_DIGIMATRIX ++MODULE_PARM_DESC(digimatrix, ++ "Asus Digimatrix it87 compat. enable/disable (1/0), default: 1"); ++#else ++MODULE_PARM_DESC(digimatrix, ++ "Asus Digimatrix it87 compat. enable/disable (1/0), default: 0"); ++#endif ++ ++ ++module_param(it87_freq, int, S_IRUGO); ++#ifdef LIRC_IT87_DIGIMATRIX ++MODULE_PARM_DESC(it87_freq, ++ "Carrier demodulator frequency (kHz), (default: 36)"); ++#else ++MODULE_PARM_DESC(it87_freq, ++ "Carrier demodulator frequency (kHz), (default: 38)"); ++#endif +diff --git a/drivers/input/lirc/lirc_it87.h b/drivers/input/lirc/lirc_it87.h +new file mode 100644 +index 0000000..cf021c8 +--- /dev/null ++++ b/drivers/input/lirc/lirc_it87.h +@@ -0,0 +1,116 @@ ++/* lirc_it87.h */ ++/* SECTION: Definitions */ ++ ++/********************************* ITE IT87xx ************************/ ++ ++/* based on the following documentation from ITE: ++ a) IT8712F Preliminary CIR Programming Guide V0.1 ++ b) IT8705F Simple LPC I/O Preliminary Specification V0.3 ++ c) IT8712F EC-LPC I/O Preliminary Specification V0.5 ++*/ ++ ++/* IT8712/05 Ports: */ ++#define IT87_ADRPORT 0x2e ++#define IT87_DATAPORT 0x2f ++#define IT87_INIT {0x87, 0x01, 0x55, 0x55} ++ ++/* alternate Ports: */ ++/* ++#define IT87_ADRPORT 0x4e ++#define IT87_DATAPORT 0x4f ++#define IT87_INIT {0x87, 0x01, 0x55, 0xaa} ++ */ ++ ++/* IT8712/05 Registers */ ++#define IT87_CFGCTRL 0x2 ++#define IT87_LDN 0x7 ++#define IT87_CHIP_ID1 0x20 ++#define IT87_CHIP_ID2 0x21 ++#define IT87_CFG_VERSION 0x22 ++#define IT87_SWSUSPEND 0x23 ++ ++#define IT8712_CIR_LDN 0xa ++#define IT8705_CIR_LDN 0x7 ++ ++/* CIR Configuration Registers: */ ++#define IT87_CIR_ACT 0x30 ++#define IT87_CIR_BASE_MSB 0x60 ++#define IT87_CIR_BASE_LSB 0x61 ++#define IT87_CIR_IRQ 0x70 ++#define IT87_CIR_CONFIG 0xf0 ++ ++/* List of IT87_CIR registers: offset to BaseAddr */ ++#define IT87_CIR_DR 0 ++#define IT87_CIR_IER 1 ++#define IT87_CIR_RCR 2 ++#define IT87_CIR_TCR1 3 ++#define IT87_CIR_TCR2 4 ++#define IT87_CIR_TSR 5 ++#define IT87_CIR_RSR 6 ++#define IT87_CIR_BDLR 5 ++#define IT87_CIR_BDHR 6 ++#define IT87_CIR_IIR 7 ++ ++/* Bit Definition */ ++/* IER: */ ++#define IT87_CIR_IER_TM_EN 0x80 ++#define IT87_CIR_IER_RESEVED 0x40 ++#define IT87_CIR_IER_RESET 0x20 ++#define IT87_CIR_IER_BR 0x10 ++#define IT87_CIR_IER_IEC 0x8 ++#define IT87_CIR_IER_RFOIE 0x4 ++#define IT87_CIR_IER_RDAIE 0x2 ++#define IT87_CIR_IER_TLDLIE 0x1 ++ ++/* RCR: */ ++#define IT87_CIR_RCR_RDWOS 0x80 ++#define IT87_CIR_RCR_HCFS 0x40 ++#define IT87_CIR_RCR_RXEN 0x20 ++#define IT87_CIR_RCR_RXEND 0x10 ++#define IT87_CIR_RCR_RXACT 0x8 ++#define IT87_CIR_RCR_RXDCR 0x7 ++ ++/* TCR1: */ ++#define IT87_CIR_TCR1_FIFOCLR 0x80 ++#define IT87_CIR_TCR1_ILE 0x40 ++#define IT87_CIR_TCR1_FIFOTL 0x30 ++#define IT87_CIR_TCR1_TXRLE 0x8 ++#define IT87_CIR_TCR1_TXENDF 0x4 ++#define IT87_CIR_TCR1_TXMPM 0x3 ++ ++/* TCR2: */ ++#define IT87_CIR_TCR2_CFQ 0xf8 ++#define IT87_CIR_TCR2_TXMPW 0x7 ++ ++/* TSR: */ ++#define IT87_CIR_TSR_RESERVED 0xc0 ++#define IT87_CIR_TSR_TXFBC 0x3f ++ ++/* RSR: */ ++#define IT87_CIR_RSR_RXFTO 0x80 ++#define IT87_CIR_RSR_RESERVED 0x40 ++#define IT87_CIR_RSR_RXFBC 0x3f ++ ++/* IIR: */ ++#define IT87_CIR_IIR_RESERVED 0xf8 ++#define IT87_CIR_IIR_IID 0x6 ++#define IT87_CIR_IIR_IIP 0x1 ++ ++/* TM: */ ++#define IT87_CIR_TM_IL_SEL 0x80 ++#define IT87_CIR_TM_RESERVED 0x40 ++#define IT87_CIR_TM_TM_REG 0x3f ++ ++#define IT87_CIR_FIFO_SIZE 32 ++ ++/* Baudratedivisor for IT87: power of 2: only 1,2,4 or 8) */ ++#define IT87_CIR_BAUDRATE_DIVISOR 0x1 ++#define IT87_CIR_DEFAULT_IOBASE 0x310 ++#define IT87_CIR_DEFAULT_IRQ 0x7 ++#define IT87_CIR_SPACE 0x00 ++#define IT87_CIR_PULSE 0xff ++#define IT87_CIR_FREQ_MIN 27 ++#define IT87_CIR_FREQ_MAX 58 ++#define TIME_CONST (IT87_CIR_BAUDRATE_DIVISOR * 8000000ul / 115200ul) ++ ++/********************************* ITE IT87xx ************************/ +diff --git a/drivers/input/lirc/lirc_ite8709.c b/drivers/input/lirc/lirc_ite8709.c +new file mode 100644 +index 0000000..6210847 +--- /dev/null ++++ b/drivers/input/lirc/lirc_ite8709.c +@@ -0,0 +1,540 @@ ++/* ++ * LIRC driver for ITE8709 CIR port ++ * ++ * Copyright (C) 2008 Grégory Lardière ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 ++ * USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++#define LIRC_DRIVER_NAME "lirc_ite8709" ++ ++#define BUF_CHUNK_SIZE sizeof(int) ++#define BUF_SIZE (128*BUF_CHUNK_SIZE) ++ ++/* ++ * The ITE8709 device seems to be the combination of IT8512 superIO chip and ++ * a specific firmware running on the IT8512's embedded micro-controller. ++ * In addition of the embedded micro-controller, the IT8512 chip contains a ++ * CIR module and several other modules. A few modules are directly accessible ++ * by the host CPU, but most of them are only accessible by the ++ * micro-controller. The CIR module is only accessible by the micro-controller. ++ * The battery-backed SRAM module is accessible by the host CPU and the ++ * micro-controller. So one of the MC's firmware role is to act as a bridge ++ * between the host CPU and the CIR module. The firmware implements a kind of ++ * communication protocol using the SRAM module as a shared memory. The IT8512 ++ * specification is publicly available on ITE's web site, but the communication ++ * protocol is not, so it was reverse-engineered. ++ */ ++ ++/* ITE8709 Registers addresses and values (reverse-engineered) */ ++#define ITE8709_MODE 0x1a ++#define ITE8709_REG_ADR 0x1b ++#define ITE8709_REG_VAL 0x1c ++#define ITE8709_IIR 0x1e /* Interrupt identification register */ ++#define ITE8709_RFSR 0x1f /* Receiver FIFO status register */ ++#define ITE8709_FIFO_START 0x20 ++ ++#define ITE8709_MODE_READY 0X00 ++#define ITE8709_MODE_WRITE 0X01 ++#define ITE8709_MODE_READ 0X02 ++#define ITE8709_IIR_RDAI 0x02 /* Receiver data available interrupt */ ++#define ITE8709_IIR_RFOI 0x04 /* Receiver FIFO overrun interrupt */ ++#define ITE8709_RFSR_MASK 0x3f /* FIFO byte count mask */ ++ ++/* ++ * IT8512 CIR-module registers addresses and values ++ * (from IT8512 E/F specification v0.4.1) ++ */ ++#define IT8512_REG_MSTCR 0x01 /* Master control register */ ++#define IT8512_REG_IER 0x02 /* Interrupt enable register */ ++#define IT8512_REG_CFR 0x04 /* Carrier frequency register */ ++#define IT8512_REG_RCR 0x05 /* Receive control register */ ++#define IT8512_REG_BDLR 0x08 /* Baud rate divisor low byte register */ ++#define IT8512_REG_BDHR 0x09 /* Baud rate divisor high byte register */ ++ ++#define IT8512_MSTCR_RESET 0x01 /* Reset registers to default value */ ++#define IT8512_MSTCR_FIFOCLR 0x02 /* Clear FIFO */ ++#define IT8512_MSTCR_FIFOTL_7 0x04 /* FIFO threshold level : 7 */ ++#define IT8512_MSTCR_FIFOTL_25 0x0c /* FIFO threshold level : 25 */ ++#define IT8512_IER_RDAIE 0x02 /* Enable data interrupt request */ ++#define IT8512_IER_RFOIE 0x04 /* Enable FIFO overrun interrupt req */ ++#define IT8512_IER_IEC 0x80 /* Enable interrupt request */ ++#define IT8512_CFR_CF_36KHZ 0x09 /* Carrier freq : low speed, 36kHz */ ++#define IT8512_RCR_RXDCR_1 0x01 /* Demodulation carrier range : 1 */ ++#define IT8512_RCR_RXACT 0x08 /* Receiver active */ ++#define IT8512_RCR_RXEN 0x80 /* Receiver enable */ ++#define IT8512_BDR_6 6 /* Baud rate divisor : 6 */ ++ ++/* Actual values used by this driver */ ++#define CFG_FIFOTL IT8512_MSTCR_FIFOTL_25 ++#define CFG_CR_FREQ IT8512_CFR_CF_36KHZ ++#define CFG_DCR IT8512_RCR_RXDCR_1 ++#define CFG_BDR IT8512_BDR_6 ++#define CFG_TIMEOUT 100000 /* Rearm interrupt when a space is > 100 ms */ ++ ++static int debug; ++ ++struct ite8709_device { ++ int use_count; ++ int io; ++ int irq; ++ spinlock_t hardware_lock; ++ unsigned long long acc_pulse; ++ unsigned long long acc_space; ++ char lastbit; ++ struct timeval last_tv; ++ struct lirc_driver driver; ++ struct tasklet_struct tasklet; ++ char force_rearm; ++ char rearmed; ++ char device_busy; ++}; ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++ ++static unsigned char ite8709_read(struct ite8709_device *dev, ++ unsigned char port) ++{ ++ outb(port, dev->io); ++ return inb(dev->io+1); ++} ++ ++static void ite8709_write(struct ite8709_device *dev, unsigned char port, ++ unsigned char data) ++{ ++ outb(port, dev->io); ++ outb(data, dev->io+1); ++} ++ ++static void ite8709_wait_device(struct ite8709_device *dev) ++{ ++ int i = 0; ++ /* ++ * loop until device tells it's ready to continue ++ * iterations count is usually ~750 but can sometimes achieve 13000 ++ */ ++ for (i = 0; i < 15000; i++) { ++ udelay(2); ++ if (ite8709_read(dev, ITE8709_MODE) == ITE8709_MODE_READY) ++ break; ++ } ++} ++ ++static void ite8709_write_register(struct ite8709_device *dev, ++ unsigned char reg_adr, unsigned char reg_value) ++{ ++ ite8709_wait_device(dev); ++ ++ ite8709_write(dev, ITE8709_REG_VAL, reg_value); ++ ite8709_write(dev, ITE8709_REG_ADR, reg_adr); ++ ite8709_write(dev, ITE8709_MODE, ITE8709_MODE_WRITE); ++} ++ ++static void ite8709_init_hardware(struct ite8709_device *dev) ++{ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 1; ++ spin_unlock_irq(&dev->hardware_lock); ++ ++ ite8709_write_register(dev, IT8512_REG_BDHR, (CFG_BDR >> 8) & 0xff); ++ ite8709_write_register(dev, IT8512_REG_BDLR, CFG_BDR & 0xff); ++ ite8709_write_register(dev, IT8512_REG_CFR, CFG_CR_FREQ); ++ ite8709_write_register(dev, IT8512_REG_IER, ++ IT8512_IER_IEC | IT8512_IER_RFOIE | IT8512_IER_RDAIE); ++ ite8709_write_register(dev, IT8512_REG_RCR, CFG_DCR); ++ ite8709_write_register(dev, IT8512_REG_MSTCR, ++ CFG_FIFOTL | IT8512_MSTCR_FIFOCLR); ++ ite8709_write_register(dev, IT8512_REG_RCR, ++ IT8512_RCR_RXEN | IT8512_RCR_RXACT | CFG_DCR); ++ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 0; ++ spin_unlock_irq(&dev->hardware_lock); ++ ++ tasklet_enable(&dev->tasklet); ++} ++ ++static void ite8709_drop_hardware(struct ite8709_device *dev) ++{ ++ tasklet_disable(&dev->tasklet); ++ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 1; ++ spin_unlock_irq(&dev->hardware_lock); ++ ++ ite8709_write_register(dev, IT8512_REG_RCR, 0); ++ ite8709_write_register(dev, IT8512_REG_MSTCR, ++ IT8512_MSTCR_RESET | IT8512_MSTCR_FIFOCLR); ++ ++ spin_lock_irq(&dev->hardware_lock); ++ dev->device_busy = 0; ++ spin_unlock_irq(&dev->hardware_lock); ++} ++ ++static int ite8709_set_use_inc(void *data) ++{ ++ struct ite8709_device *dev; ++ dev = data; ++ if (dev->use_count == 0) ++ ite8709_init_hardware(dev); ++ dev->use_count++; ++ return 0; ++} ++ ++static void ite8709_set_use_dec(void *data) ++{ ++ struct ite8709_device *dev; ++ dev = data; ++ dev->use_count--; ++ if (dev->use_count == 0) ++ ite8709_drop_hardware(dev); ++} ++ ++static void ite8709_add_read_queue(struct ite8709_device *dev, int flag, ++ unsigned long long val) ++{ ++ int value; ++ ++ dprintk("add a %llu usec %s\n", val, flag ? "pulse" : "space"); ++ ++ value = (val > PULSE_MASK) ? PULSE_MASK : val; ++ if (flag) ++ value |= PULSE_BIT; ++ ++ if (!lirc_buffer_full(dev->driver.rbuf)) { ++ lirc_buffer_write(dev->driver.rbuf, (void *) &value); ++ wake_up(&dev->driver.rbuf->wait_poll); ++ } ++} ++ ++static irqreturn_t ite8709_interrupt(int irq, void *dev_id) ++{ ++ unsigned char data; ++ int iir, rfsr, i; ++ int fifo = 0; ++ char bit; ++ struct timeval curr_tv; ++ ++ /* Bit duration in microseconds */ ++ const unsigned long bit_duration = 1000000ul / (115200 / CFG_BDR); ++ ++ struct ite8709_device *dev; ++ dev = dev_id; ++ ++ /* ++ * If device is busy, we simply discard data because we are in one of ++ * these two cases : shutting down or rearming the device, so this ++ * doesn't really matter and this avoids waiting too long in IRQ ctx ++ */ ++ spin_lock(&dev->hardware_lock); ++ if (dev->device_busy) { ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_HANDLED); ++ } ++ ++ iir = ite8709_read(dev, ITE8709_IIR); ++ ++ switch (iir) { ++ case ITE8709_IIR_RFOI: ++ dprintk("fifo overrun, scheduling forced rearm just in case\n"); ++ dev->force_rearm = 1; ++ tasklet_schedule(&dev->tasklet); ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_HANDLED); ++ ++ case ITE8709_IIR_RDAI: ++ rfsr = ite8709_read(dev, ITE8709_RFSR); ++ fifo = rfsr & ITE8709_RFSR_MASK; ++ if (fifo > 32) ++ fifo = 32; ++ dprintk("iir: 0x%x rfsr: 0x%x fifo: %d\n", iir, rfsr, fifo); ++ ++ if (dev->rearmed) { ++ do_gettimeofday(&curr_tv); ++ dev->acc_space += 1000000ull ++ * (curr_tv.tv_sec - dev->last_tv.tv_sec) ++ + (curr_tv.tv_usec - dev->last_tv.tv_usec); ++ dev->rearmed = 0; ++ } ++ for (i = 0; i < fifo; i++) { ++ data = ite8709_read(dev, i+ITE8709_FIFO_START); ++ data = ~data; ++ /* Loop through */ ++ for (bit = 0; bit < 8; ++bit) { ++ if ((data >> bit) & 1) { ++ dev->acc_pulse += bit_duration; ++ if (dev->lastbit == 0) { ++ ite8709_add_read_queue(dev, 0, ++ dev->acc_space); ++ dev->acc_space = 0; ++ } ++ } else { ++ dev->acc_space += bit_duration; ++ if (dev->lastbit == 1) { ++ ite8709_add_read_queue(dev, 1, ++ dev->acc_pulse); ++ dev->acc_pulse = 0; ++ } ++ } ++ dev->lastbit = (data >> bit) & 1; ++ } ++ } ++ ite8709_write(dev, ITE8709_RFSR, 0); ++ ++ if (dev->acc_space > CFG_TIMEOUT) { ++ dprintk("scheduling rearm IRQ\n"); ++ do_gettimeofday(&dev->last_tv); ++ dev->force_rearm = 0; ++ tasklet_schedule(&dev->tasklet); ++ } ++ ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_HANDLED); ++ ++ default: ++ /* not our irq */ ++ dprintk("unknown IRQ (shouldn't happen) !!\n"); ++ spin_unlock(&dev->hardware_lock); ++ return IRQ_RETVAL(IRQ_NONE); ++ } ++} ++ ++static void ite8709_rearm_irq(unsigned long data) ++{ ++ struct ite8709_device *dev; ++ unsigned long flags; ++ dev = (struct ite8709_device *) data; ++ ++ spin_lock_irqsave(&dev->hardware_lock, flags); ++ dev->device_busy = 1; ++ spin_unlock_irqrestore(&dev->hardware_lock, flags); ++ ++ if (dev->force_rearm || dev->acc_space > CFG_TIMEOUT) { ++ dprintk("rearming IRQ\n"); ++ ite8709_write_register(dev, IT8512_REG_RCR, ++ IT8512_RCR_RXACT | CFG_DCR); ++ ite8709_write_register(dev, IT8512_REG_MSTCR, ++ CFG_FIFOTL | IT8512_MSTCR_FIFOCLR); ++ ite8709_write_register(dev, IT8512_REG_RCR, ++ IT8512_RCR_RXEN | IT8512_RCR_RXACT | CFG_DCR); ++ if (!dev->force_rearm) ++ dev->rearmed = 1; ++ dev->force_rearm = 0; ++ } ++ ++ spin_lock_irqsave(&dev->hardware_lock, flags); ++ dev->device_busy = 0; ++ spin_unlock_irqrestore(&dev->hardware_lock, flags); ++} ++ ++static int ite8709_cleanup(struct ite8709_device *dev, int stage, int errno, ++ char *msg) ++{ ++ if (msg != NULL) ++ printk(KERN_ERR LIRC_DRIVER_NAME ": %s\n", msg); ++ ++ switch (stage) { ++ case 6: ++ if (dev->use_count > 0) ++ ite8709_drop_hardware(dev); ++ case 5: ++ free_irq(dev->irq, dev); ++ case 4: ++ release_region(dev->io, 2); ++ case 3: ++ lirc_unregister_driver(dev->driver.minor); ++ case 2: ++ lirc_buffer_free(dev->driver.rbuf); ++ kfree(dev->driver.rbuf); ++ case 1: ++ kfree(dev); ++ case 0: ++ ; ++ } ++ ++ return errno; ++} ++ ++static int __devinit ite8709_pnp_probe(struct pnp_dev *dev, ++ const struct pnp_device_id *dev_id) ++{ ++ struct lirc_driver *driver; ++ struct ite8709_device *ite8709_dev; ++ int ret; ++ ++ /* Check resources validity */ ++ if (!pnp_irq_valid(dev, 0)) ++ return ite8709_cleanup(NULL, 0, -ENODEV, "invalid IRQ"); ++ if (!pnp_port_valid(dev, 2)) ++ return ite8709_cleanup(NULL, 0, -ENODEV, "invalid IO port"); ++ ++ /* Allocate memory for device struct */ ++ ite8709_dev = kzalloc(sizeof(struct ite8709_device), GFP_KERNEL); ++ if (ite8709_dev == NULL) ++ return ite8709_cleanup(NULL, 0, -ENOMEM, "kzalloc failed"); ++ pnp_set_drvdata(dev, ite8709_dev); ++ ++ /* Initialize device struct */ ++ ite8709_dev->use_count = 0; ++ ite8709_dev->irq = pnp_irq(dev, 0); ++ ite8709_dev->io = pnp_port_start(dev, 2); ++ ite8709_dev->hardware_lock = __SPIN_LOCK_UNLOCKED( ++ ite8709_dev->hardware_lock); ++ ite8709_dev->acc_pulse = 0; ++ ite8709_dev->acc_space = 0; ++ ite8709_dev->lastbit = 0; ++ do_gettimeofday(&ite8709_dev->last_tv); ++ tasklet_init(&ite8709_dev->tasklet, ite8709_rearm_irq, ++ (long) ite8709_dev); ++ ite8709_dev->force_rearm = 0; ++ ite8709_dev->rearmed = 0; ++ ite8709_dev->device_busy = 0; ++ ++ /* Initialize driver struct */ ++ driver = &ite8709_dev->driver; ++ strcpy(driver->name, LIRC_DRIVER_NAME); ++ driver->minor = -1; ++ driver->code_length = sizeof(int) * 8; ++ driver->sample_rate = 0; ++ driver->features = LIRC_CAN_REC_MODE2; ++ driver->data = ite8709_dev; ++ driver->add_to_buf = NULL; ++ driver->set_use_inc = ite8709_set_use_inc; ++ driver->set_use_dec = ite8709_set_use_dec; ++ driver->dev = &dev->dev; ++ driver->owner = THIS_MODULE; ++ ++ /* Initialize LIRC buffer */ ++ driver->rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!driver->rbuf) ++ return ite8709_cleanup(ite8709_dev, 1, -ENOMEM, ++ "can't allocate lirc_buffer"); ++ if (lirc_buffer_init(driver->rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) ++ return ite8709_cleanup(ite8709_dev, 1, -ENOMEM, ++ "lirc_buffer_init() failed"); ++ ++ /* Register LIRC driver */ ++ ret = lirc_register_driver(driver); ++ if (ret < 0) ++ return ite8709_cleanup(ite8709_dev, 2, ret, ++ "lirc_register_driver() failed"); ++ ++ /* Reserve I/O port access */ ++ if (!request_region(ite8709_dev->io, 2, LIRC_DRIVER_NAME)) ++ return ite8709_cleanup(ite8709_dev, 3, -EBUSY, ++ "i/o port already in use"); ++ ++ /* Reserve IRQ line */ ++ ret = request_irq(ite8709_dev->irq, ite8709_interrupt, 0, ++ LIRC_DRIVER_NAME, ite8709_dev); ++ if (ret < 0) ++ return ite8709_cleanup(ite8709_dev, 4, ret, ++ "IRQ already in use"); ++ ++ /* Initialize hardware */ ++ ite8709_drop_hardware(ite8709_dev); /* Shutdown hw until first use */ ++ ++ printk(KERN_INFO LIRC_DRIVER_NAME ": device found : irq=%d io=0x%x\n", ++ ite8709_dev->irq, ite8709_dev->io); ++ ++ return 0; ++} ++ ++static void __devexit ite8709_pnp_remove(struct pnp_dev *dev) ++{ ++ struct ite8709_device *ite8709_dev; ++ ite8709_dev = pnp_get_drvdata(dev); ++ ++ ite8709_cleanup(ite8709_dev, 6, 0, NULL); ++ ++ printk(KERN_INFO LIRC_DRIVER_NAME ": device removed\n"); ++} ++ ++#ifdef CONFIG_PM ++static int ite8709_pnp_suspend(struct pnp_dev *dev, pm_message_t state) ++{ ++ struct ite8709_device *ite8709_dev; ++ ite8709_dev = pnp_get_drvdata(dev); ++ ++ if (ite8709_dev->use_count > 0) ++ ite8709_drop_hardware(ite8709_dev); ++ ++ return 0; ++} ++ ++static int ite8709_pnp_resume(struct pnp_dev *dev) ++{ ++ struct ite8709_device *ite8709_dev; ++ ite8709_dev = pnp_get_drvdata(dev); ++ ++ if (ite8709_dev->use_count > 0) ++ ite8709_init_hardware(ite8709_dev); ++ ++ return 0; ++} ++#else ++#define ite8709_pnp_suspend NULL ++#define ite8709_pnp_resume NULL ++#endif ++ ++static const struct pnp_device_id pnp_dev_table[] = { ++ {"ITE8709", 0}, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(pnp, pnp_dev_table); ++ ++static struct pnp_driver ite8709_pnp_driver = { ++ .name = LIRC_DRIVER_NAME, ++ .probe = ite8709_pnp_probe, ++ .remove = __devexit_p(ite8709_pnp_remove), ++ .suspend = ite8709_pnp_suspend, ++ .resume = ite8709_pnp_resume, ++ .id_table = pnp_dev_table, ++}; ++ ++int init_module(void) ++{ ++ return pnp_register_driver(&ite8709_pnp_driver); ++} ++ ++void cleanup_module(void) ++{ ++ pnp_unregister_driver(&ite8709_pnp_driver); ++} ++ ++MODULE_DESCRIPTION("LIRC driver for ITE8709 CIR port"); ++MODULE_AUTHOR("Grégory Lardière"); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); +diff --git a/drivers/input/lirc/lirc_mceusb.c b/drivers/input/lirc/lirc_mceusb.c +new file mode 100644 +index 0000000..8b404e2 +--- /dev/null ++++ b/drivers/input/lirc/lirc_mceusb.c +@@ -0,0 +1,1222 @@ ++/* ++ * LIRC driver for Windows Media Center Edition USB Infrared Transceivers ++ * ++ * (C) by Martin A. Blatter ++ * ++ * Transmitter support and reception code cleanup. ++ * (C) by Daniel Melander ++ * ++ * Original lirc_mceusb driver for 1st-gen device: ++ * Copyright (c) 2003-2004 Dan Conti ++ * ++ * Original lirc_mceusb driver deprecated in favor of this driver, which ++ * supports the 1st-gen device now too. Transmit and receive support for ++ * the 1st-gen device added June-September 2009, ++ * by Jarod Wilson and Patrick Calhoun ++ * ++ * Derived from ATI USB driver by Paul Miller and the original ++ * MCE USB driver by Dan Conti (and now including chunks of the latter ++ * relevant to the 1st-gen device initialization) ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++#define DRIVER_VERSION "1.90" ++#define DRIVER_AUTHOR "Daniel Melander , " \ ++ "Martin Blatter , " \ ++ "Dan Conti " ++#define DRIVER_DESC "Windows Media Center Edition USB IR Transceiver " \ ++ "driver for LIRC" ++#define DRIVER_NAME "lirc_mceusb" ++ ++#define USB_BUFLEN 32 /* USB reception buffer length */ ++#define LIRCBUF_SIZE 256 /* LIRC work buffer length */ ++ ++/* MCE constants */ ++#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */ ++#define MCE_TIME_UNIT 50 /* Approx 50us resolution */ ++#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */ ++#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */ ++#define MCE_PACKET_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */ ++#define MCE_CONTROL_HEADER 0x9F /* MCE status header */ ++#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */ ++#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */ ++#define MCE_DEFAULT_TX_MASK 0x03 /* Val opts: TX1=0x01, TX2=0x02, ALL=0x03 */ ++#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */ ++#define MCE_PULSE_MASK 0x7F /* Pulse mask */ ++#define MCE_MAX_PULSE_LENGTH 0x7F /* Longest transmittable pulse symbol */ ++#define MCE_PACKET_LENGTH_MASK 0x7F /* Pulse mask */ ++ ++ ++/* module parameters */ ++#ifdef CONFIG_USB_DEBUG ++static int debug = 1; ++#else ++static int debug; ++#endif ++ ++/* general constants */ ++#define SEND_FLAG_IN_PROGRESS 1 ++#define SEND_FLAG_COMPLETE 2 ++#define RECV_FLAG_IN_PROGRESS 3 ++#define RECV_FLAG_COMPLETE 4 ++ ++#define MCEUSB_INBOUND 1 ++#define MCEUSB_OUTBOUND 2 ++ ++#define VENDOR_PHILIPS 0x0471 ++#define VENDOR_SMK 0x0609 ++#define VENDOR_TATUNG 0x1460 ++#define VENDOR_GATEWAY 0x107b ++#define VENDOR_SHUTTLE 0x1308 ++#define VENDOR_SHUTTLE2 0x051c ++#define VENDOR_MITSUMI 0x03ee ++#define VENDOR_TOPSEED 0x1784 ++#define VENDOR_RICAVISION 0x179d ++#define VENDOR_ITRON 0x195d ++#define VENDOR_FIC 0x1509 ++#define VENDOR_LG 0x043e ++#define VENDOR_MICROSOFT 0x045e ++#define VENDOR_FORMOSA 0x147a ++#define VENDOR_FINTEK 0x1934 ++#define VENDOR_PINNACLE 0x2304 ++#define VENDOR_ECS 0x1019 ++#define VENDOR_WISTRON 0x0fb8 ++#define VENDOR_COMPRO 0x185b ++#define VENDOR_NORTHSTAR 0x04eb ++#define VENDOR_REALTEK 0x0bda ++#define VENDOR_TIVO 0x105a ++ ++static struct usb_device_id mceusb_dev_table[] = { ++ /* Original Microsoft MCE IR Transceiver (often HP-branded) */ ++ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) }, ++ /* Philips Infrared Transceiver - Sahara branded */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x0608) }, ++ /* Philips Infrared Transceiver - HP branded */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060c) }, ++ /* Philips SRM5100 */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060d) }, ++ /* Philips Infrared Transceiver - Omaura */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060f) }, ++ /* Philips Infrared Transceiver - Spinel plus */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, ++ /* Philips eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, ++ /* Realtek MCE IR Receiver */ ++ { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, ++ /* SMK/Toshiba G83C0004D410 */ ++ { USB_DEVICE(VENDOR_SMK, 0x031d) }, ++ /* SMK eHome Infrared Transceiver (Sony VAIO) */ ++ { USB_DEVICE(VENDOR_SMK, 0x0322) }, ++ /* bundled with Hauppauge PVR-150 */ ++ { USB_DEVICE(VENDOR_SMK, 0x0334) }, ++ /* SMK eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_SMK, 0x0338) }, ++ /* Tatung eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TATUNG, 0x9150) }, ++ /* Shuttle eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_SHUTTLE, 0xc001) }, ++ /* Shuttle eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_SHUTTLE2, 0xc001) }, ++ /* Gateway eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_GATEWAY, 0x3009) }, ++ /* Mitsumi */ ++ { USB_DEVICE(VENDOR_MITSUMI, 0x2501) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0001) }, ++ /* Topseed HP eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0006) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0007) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x000a) }, ++ /* Ricavision internal Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_RICAVISION, 0x0010) }, ++ /* Itron ione Libra Q-11 */ ++ { USB_DEVICE(VENDOR_ITRON, 0x7002) }, ++ /* FIC eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_FIC, 0x9242) }, ++ /* LG eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_LG, 0x9803) }, ++ /* Microsoft MCE Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_MICROSOFT, 0x00a0) }, ++ /* Formosa eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe015) }, ++ /* Formosa21 / eHome Infrared Receiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe016) }, ++ /* Formosa aim / Trust MCE Infrared Receiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe017) }, ++ /* Formosa Industrial Computing / Beanbag Emulation Device */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe018) }, ++ /* Formosa21 / eHome Infrared Receiver */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe03a) }, ++ /* Formosa Industrial Computing AIM IR605/A */ ++ { USB_DEVICE(VENDOR_FORMOSA, 0xe03c) }, ++ /* Fintek eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_FINTEK, 0x0602) }, ++ /* Fintek eHome Infrared Transceiver (in the AOpen MP45) */ ++ { USB_DEVICE(VENDOR_FINTEK, 0x0702) }, ++ /* Pinnacle Remote Kit */ ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ /* Elitegroup Computer Systems IR */ ++ { USB_DEVICE(VENDOR_ECS, 0x0f38) }, ++ /* Wistron Corp. eHome Infrared Receiver */ ++ { USB_DEVICE(VENDOR_WISTRON, 0x0002) }, ++ /* Compro K100 */ ++ { USB_DEVICE(VENDOR_COMPRO, 0x3020) }, ++ /* Compro K100 v2 */ ++ { USB_DEVICE(VENDOR_COMPRO, 0x3082) }, ++ /* Northstar Systems, Inc. eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) }, ++ /* TiVo PC IR Receiver */ ++ { USB_DEVICE(VENDOR_TIVO, 0x2000) }, ++ /* Terminating entry */ ++ { } ++}; ++ ++static struct usb_device_id pinnacle_list[] = { ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ {} ++}; ++ ++static struct usb_device_id microsoft_gen1_list[] = { ++ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) }, ++ {} ++}; ++ ++static struct usb_device_id transmitter_mask_list[] = { ++ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) }, ++ { USB_DEVICE(VENDOR_PHILIPS, 0x060c) }, ++ { USB_DEVICE(VENDOR_SMK, 0x031d) }, ++ { USB_DEVICE(VENDOR_SMK, 0x0322) }, ++ { USB_DEVICE(VENDOR_SMK, 0x0334) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0001) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0006) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0007) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x000a) }, ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ {} ++}; ++ ++/* data structure for each usb transceiver */ ++struct mceusb_dev { ++ ++ /* usb */ ++ struct usb_device *usbdev; ++ struct urb *urb_in; ++ int devnum; ++ struct usb_endpoint_descriptor *usb_ep_in; ++ struct usb_endpoint_descriptor *usb_ep_out; ++ ++ /* buffers and dma */ ++ unsigned char *buf_in; ++ unsigned int len_in; ++ dma_addr_t dma_in; ++ dma_addr_t dma_out; ++ unsigned int overflow_len; ++ ++ /* lirc */ ++ struct lirc_driver *d; ++ int lircdata; ++ unsigned char is_pulse; ++ struct { ++ u32 connected:1; ++ u32 pinnacle:1; ++ u32 transmitter_mask_inverted:1; ++ u32 microsoft_gen1:1; ++ u32 reserved:28; ++ } flags; ++ ++ unsigned char transmitter_mask; ++ unsigned int carrier_freq; ++ ++ /* handle sending (init strings) */ ++ int send_flags; ++ wait_queue_head_t wait_out; ++ ++ struct mutex dev_lock; ++}; ++ ++/* init strings */ ++static char init1[] = {0x00, 0xff, 0xaa, 0xff, 0x0b}; ++static char init2[] = {0xff, 0x18}; ++ ++static char pin_init1[] = { 0x9f, 0x07}; ++static char pin_init2[] = { 0x9f, 0x13}; ++static char pin_init3[] = { 0x9f, 0x0d}; ++ ++static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, int len) ++{ ++ char codes[USB_BUFLEN * 3 + 1]; ++ int i; ++ ++ if (len <= 0) ++ return; ++ ++ if (ir->flags.microsoft_gen1 && len <= 2) ++ return; ++ ++ for (i = 0; i < len && i < USB_BUFLEN; i++) ++ snprintf(codes + i * 3, 4, "%02x ", buf[i] & 0xFF); ++ ++ dev_info(ir->d->dev, "data received %s (length=%d)\n", codes, len); ++} ++ ++static void usb_async_callback(struct urb *urb, struct pt_regs *regs) ++{ ++ struct mceusb_dev *ir; ++ int len; ++ ++ if (!urb) ++ return; ++ ++ ir = urb->context; ++ if (ir) { ++ len = urb->actual_length; ++ ++ dev_dbg(ir->d->dev, "callback called (status=%d len=%d)\n", ++ urb->status, len); ++ ++ if (debug) ++ mceusb_dev_printdata(ir, urb->transfer_buffer, len); ++ } ++ ++} ++ ++/* request incoming or send outgoing usb packet - used to initialize remote */ ++static void request_packet_async(struct mceusb_dev *ir, ++ struct usb_endpoint_descriptor *ep, ++ unsigned char *data, int size, int urb_type) ++{ ++ int res; ++ struct urb *async_urb; ++ unsigned char *async_buf; ++ ++ if (urb_type) { ++ async_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (unlikely(!async_urb)) ++ return; ++ ++ async_buf = kzalloc(size, GFP_KERNEL); ++ if (!async_buf) { ++ usb_free_urb(async_urb); ++ return; ++ } ++ ++ if (urb_type == MCEUSB_OUTBOUND) { ++ /* outbound data */ ++ usb_fill_int_urb(async_urb, ir->usbdev, ++ usb_sndintpipe(ir->usbdev, ++ ep->bEndpointAddress), ++ async_buf, size, ++ (usb_complete_t) usb_async_callback, ++ ir, ep->bInterval); ++ memcpy(async_buf, data, size); ++ } else { ++ /* inbound data */ ++ usb_fill_int_urb(async_urb, ir->usbdev, ++ usb_rcvintpipe(ir->usbdev, ++ ep->bEndpointAddress), ++ async_buf, size, ++ (usb_complete_t) usb_async_callback, ++ ir, ep->bInterval); ++ } ++ ++ } else { ++ /* standard request */ ++ async_urb = ir->urb_in; ++ ir->send_flags = RECV_FLAG_IN_PROGRESS; ++ } ++ ++ dev_dbg(ir->d->dev, "receive request called (size=%#x)\n", size); ++ ++ async_urb->transfer_buffer_length = size; ++ async_urb->dev = ir->usbdev; ++ ++ res = usb_submit_urb(async_urb, GFP_ATOMIC); ++ if (res) { ++ dev_dbg(ir->d->dev, "receive request FAILED! (res=%d)\n", res); ++ return; ++ } ++ dev_dbg(ir->d->dev, "receive request complete (res=%d)\n", res); ++} ++ ++static int unregister_from_lirc(struct mceusb_dev *ir) ++{ ++ struct lirc_driver *d = ir->d; ++ int devnum; ++ int rtn; ++ ++ devnum = ir->devnum; ++ dev_dbg(ir->d->dev, "unregister from lirc called\n"); ++ ++ rtn = lirc_unregister_driver(d->minor); ++ if (rtn > 0) { ++ dev_info(ir->d->dev, "error in lirc_unregister minor: %d\n" ++ "Trying again...\n", d->minor); ++ if (rtn == -EBUSY) { ++ dev_info(ir->d->dev, "device is opened, will " ++ "unregister on close\n"); ++ return -EAGAIN; ++ } ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ); ++ ++ rtn = lirc_unregister_driver(d->minor); ++ if (rtn > 0) ++ dev_info(ir->d->dev, "lirc_unregister failed\n"); ++ } ++ ++ if (rtn) { ++ dev_info(ir->d->dev, "didn't free resources\n"); ++ return -EAGAIN; ++ } ++ ++ dev_info(ir->d->dev, "usb remote disconnected\n"); ++ ++ lirc_buffer_free(d->rbuf); ++ kfree(d->rbuf); ++ kfree(d); ++ kfree(ir); ++ return 0; ++} ++ ++static int mceusb_ir_open(void *data) ++{ ++ struct mceusb_dev *ir = data; ++ ++ if (!ir) { ++ printk(KERN_WARNING DRIVER_NAME ++ "[?]: %s called with no context\n", __func__); ++ return -EIO; ++ } ++ ++ dev_dbg(ir->d->dev, "mceusb IR device opened\n"); ++ ++ if (!ir->flags.connected) { ++ if (!ir->usbdev) ++ return -ENOENT; ++ ir->flags.connected = 1; ++ } ++ ++ return 0; ++} ++ ++static void mceusb_ir_close(void *data) ++{ ++ struct mceusb_dev *ir = data; ++ ++ if (!ir) { ++ printk(KERN_WARNING DRIVER_NAME ++ "[?]: %s called with no context\n", __func__); ++ return; ++ } ++ ++ dev_dbg(ir->d->dev, "mceusb IR device closed\n"); ++ ++ if (ir->flags.connected) { ++ mutex_lock(&ir->dev_lock); ++ ir->flags.connected = 0; ++ mutex_unlock(&ir->dev_lock); ++ } ++} ++ ++static void send_packet_to_lirc(struct mceusb_dev *ir) ++{ ++ if (ir->lircdata) { ++ lirc_buffer_write(ir->d->rbuf, ++ (unsigned char *) &ir->lircdata); ++ wake_up(&ir->d->rbuf->wait_poll); ++ ir->lircdata = 0; ++ } ++} ++ ++static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) ++{ ++ int i, j; ++ int packet_len = 0; ++ int start_index = 0; ++ ++ /* skip meaningless 0xb1 0x60 header bytes on orig receiver */ ++ if (ir->flags.microsoft_gen1) ++ start_index = 2; ++ ++ /* this should only trigger w/the 1st-gen mce receiver */ ++ for (i = start_index; i < (start_index + ir->overflow_len) && ++ i < buf_len; i++) { ++ /* rising/falling flank */ ++ if (ir->is_pulse != (ir->buf_in[i] & MCE_PULSE_BIT)) { ++ send_packet_to_lirc(ir); ++ ir->is_pulse = ir->buf_in[i] & MCE_PULSE_BIT; ++ } ++ ++ /* accumulate mce pulse/space values */ ++ ir->lircdata += (ir->buf_in[i] & MCE_PULSE_MASK) * ++ MCE_TIME_UNIT; ++ ir->lircdata |= (ir->is_pulse ? PULSE_BIT : 0); ++ } ++ start_index += ir->overflow_len; ++ ir->overflow_len = 0; ++ ++ for (i = start_index; i < buf_len; i++) { ++ /* decode mce packets of the form (84),AA,BB,CC,DD */ ++ ++ /* data headers */ ++ if (ir->buf_in[i] >= 0x80 && ir->buf_in[i] <= 0x9e) { ++ /* decode packet data */ ++ packet_len = ir->buf_in[i] & MCE_PACKET_LENGTH_MASK; ++ ir->overflow_len = i + 1 + packet_len - buf_len; ++ for (j = 1; j <= packet_len && (i + j < buf_len); j++) { ++ /* rising/falling flank */ ++ if (ir->is_pulse != ++ (ir->buf_in[i + j] & MCE_PULSE_BIT)) { ++ send_packet_to_lirc(ir); ++ ir->is_pulse = ++ ir->buf_in[i + j] & ++ MCE_PULSE_BIT; ++ } ++ ++ /* accumulate mce pulse/space values */ ++ ir->lircdata += ++ (ir->buf_in[i + j] & MCE_PULSE_MASK) * ++ MCE_TIME_UNIT; ++ ir->lircdata |= (ir->is_pulse ? PULSE_BIT : 0); ++ } ++ ++ i += packet_len; ++ ++ /* status header (0x9F) */ ++ } else if (ir->buf_in[i] == MCE_CONTROL_HEADER) { ++ /* ++ * A transmission containing one or more consecutive ir ++ * commands always ends with a GAP of 100ms followed by ++ * the sequence 0x9F 0x01 0x01 0x9F 0x15 0x00 0x00 0x80 ++ */ ++ ++#if 0 ++ Uncomment this if the last 100ms "infinity"-space should be transmitted ++ to lirc directly instead of at the beginning of the next transmission. ++ Changes pulse/space order. ++ ++ if (++i < buf_len && ir->buf_in[i] == 0x01) ++ send_packet_to_lirc(ir); ++ ++#endif ++ ++ /* end decode loop */ ++ dev_dbg(ir->d->dev, "[%d] %s: found control header\n", ++ ir->devnum, __func__); ++ ir->overflow_len = 0; ++ break; ++ } else { ++ dev_dbg(ir->d->dev, "[%d] %s: stray packet?\n", ++ ir->devnum, __func__); ++ ir->overflow_len = 0; ++ } ++ } ++ ++ return; ++} ++ ++static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs) ++{ ++ struct mceusb_dev *ir; ++ int buf_len; ++ ++ if (!urb) ++ return; ++ ++ ir = urb->context; ++ if (!ir) { ++ usb_unlink_urb(urb); ++ return; ++ } ++ ++ buf_len = urb->actual_length; ++ ++ if (debug) ++ mceusb_dev_printdata(ir, urb->transfer_buffer, buf_len); ++ ++ if (ir->send_flags == RECV_FLAG_IN_PROGRESS) { ++ ir->send_flags = SEND_FLAG_COMPLETE; ++ dev_dbg(ir->d->dev, "setup answer received %d bytes\n", ++ buf_len); ++ } ++ ++ switch (urb->status) { ++ /* success */ ++ case 0: ++ mceusb_process_ir_data(ir, buf_len); ++ break; ++ ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ usb_unlink_urb(urb); ++ return; ++ ++ case -EPIPE: ++ default: ++ break; ++ } ++ ++ usb_submit_urb(urb, GFP_ATOMIC); ++} ++ ++ ++static ssize_t mceusb_transmit_ir(struct file *file, const char *buf, ++ size_t n, loff_t *ppos) ++{ ++ int i, count = 0, cmdcount = 0; ++ struct mceusb_dev *ir = NULL; ++ int wbuf[LIRCBUF_SIZE]; /* Workbuffer with values from lirc */ ++ unsigned char cmdbuf[MCE_CMDBUF_SIZE]; /* MCE command buffer */ ++ unsigned long signal_duration = 0; /* Singnal length in us */ ++ struct timeval start_time, end_time; ++ ++ do_gettimeofday(&start_time); ++ ++ /* Retrieve lirc_driver data for the device */ ++ ir = lirc_get_pdata(file); ++ if (!ir || !ir->usb_ep_out) ++ return -EFAULT; ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ count = n / sizeof(int); ++ ++ /* Check if command is within limits */ ++ if (count > LIRCBUF_SIZE || count%2 == 0) ++ return -EINVAL; ++ if (copy_from_user(wbuf, buf, n)) ++ return -EFAULT; ++ ++ /* MCE tx init header */ ++ cmdbuf[cmdcount++] = MCE_CONTROL_HEADER; ++ cmdbuf[cmdcount++] = 0x08; ++ cmdbuf[cmdcount++] = ir->transmitter_mask; ++ ++ /* Generate mce packet data */ ++ for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) { ++ signal_duration += wbuf[i]; ++ wbuf[i] = wbuf[i] / MCE_TIME_UNIT; ++ ++ do { /* loop to support long pulses/spaces > 127*50us=6.35ms */ ++ ++ /* Insert mce packet header every 4th entry */ ++ if ((cmdcount < MCE_CMDBUF_SIZE) && ++ (cmdcount - MCE_TX_HEADER_LENGTH) % ++ MCE_CODE_LENGTH == 0) ++ cmdbuf[cmdcount++] = MCE_PACKET_HEADER; ++ ++ /* Insert mce packet data */ ++ if (cmdcount < MCE_CMDBUF_SIZE) ++ cmdbuf[cmdcount++] = ++ (wbuf[i] < MCE_PULSE_BIT ? ++ wbuf[i] : MCE_MAX_PULSE_LENGTH) | ++ (i & 1 ? 0x00 : MCE_PULSE_BIT); ++ else ++ return -EINVAL; ++ } while ((wbuf[i] > MCE_MAX_PULSE_LENGTH) && ++ (wbuf[i] -= MCE_MAX_PULSE_LENGTH)); ++ } ++ ++ /* Fix packet length in last header */ ++ cmdbuf[cmdcount - (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH] = ++ 0x80 + (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH - 1; ++ ++ /* Check if we have room for the empty packet at the end */ ++ if (cmdcount >= MCE_CMDBUF_SIZE) ++ return -EINVAL; ++ ++ /* All mce commands end with an empty packet (0x80) */ ++ cmdbuf[cmdcount++] = 0x80; ++ ++ /* Transmit the command to the mce device */ ++ request_packet_async(ir, ir->usb_ep_out, cmdbuf, ++ cmdcount, MCEUSB_OUTBOUND); ++ ++ /* ++ * The lircd gap calculation expects the write function to ++ * wait the time it takes for the ircommand to be sent before ++ * it returns. ++ */ ++ do_gettimeofday(&end_time); ++ signal_duration -= (end_time.tv_usec - start_time.tv_usec) + ++ (end_time.tv_sec - start_time.tv_sec) * 1000000; ++ ++ /* delay with the closest number of ticks */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(usecs_to_jiffies(signal_duration)); ++ ++ return n; ++} ++ ++static void set_transmitter_mask(struct mceusb_dev *ir, unsigned int mask) ++{ ++ if (ir->flags.transmitter_mask_inverted) ++ ir->transmitter_mask = (mask != 0x03 ? mask ^ 0x03 : mask) << 1; ++ else ++ ir->transmitter_mask = mask; ++} ++ ++ ++/* Sets the send carrier frequency */ ++static int set_send_carrier(struct mceusb_dev *ir, int carrier) ++{ ++ int clk = 10000000; ++ int prescaler = 0, divisor = 0; ++ unsigned char cmdbuf[] = { 0x9F, 0x06, 0x01, 0x80 }; ++ ++ /* Carrier is changed */ ++ if (ir->carrier_freq != carrier) { ++ ++ if (carrier <= 0) { ++ ir->carrier_freq = carrier; ++ dev_dbg(ir->d->dev, "SET_CARRIER disabling carrier " ++ "modulation\n"); ++ request_packet_async(ir, ir->usb_ep_out, ++ cmdbuf, sizeof(cmdbuf), ++ MCEUSB_OUTBOUND); ++ return carrier; ++ } ++ ++ for (prescaler = 0; prescaler < 4; ++prescaler) { ++ divisor = (clk >> (2 * prescaler)) / carrier; ++ if (divisor <= 0xFF) { ++ ir->carrier_freq = carrier; ++ cmdbuf[2] = prescaler; ++ cmdbuf[3] = divisor; ++ dev_dbg(ir->d->dev, "SET_CARRIER requesting " ++ "%d Hz\n", carrier); ++ ++ /* Transmit new carrier to mce device */ ++ request_packet_async(ir, ir->usb_ep_out, ++ cmdbuf, sizeof(cmdbuf), ++ MCEUSB_OUTBOUND); ++ return carrier; ++ } ++ } ++ ++ return -EINVAL; ++ ++ } ++ ++ return carrier; ++} ++ ++ ++static int mceusb_lirc_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg) ++{ ++ int result; ++ unsigned int ivalue; ++ unsigned long lvalue; ++ struct mceusb_dev *ir = NULL; ++ ++ /* Retrieve lirc_driver data for the device */ ++ ir = lirc_get_pdata(filep); ++ if (!ir || !ir->usb_ep_out) ++ return -EFAULT; ++ ++ ++ switch (cmd) { ++ case LIRC_SET_TRANSMITTER_MASK: ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ switch (ivalue) { ++ case 0x01: /* Transmitter 1 => 0x04 */ ++ case 0x02: /* Transmitter 2 => 0x02 */ ++ case 0x03: /* Transmitter 1 & 2 => 0x06 */ ++ set_transmitter_mask(ir, ivalue); ++ break; ++ ++ default: /* Unsupported transmitter mask */ ++ return MCE_MAX_CHANNELS; ++ } ++ ++ dev_dbg(ir->d->dev, ": SET_TRANSMITTERS mask=%d\n", ivalue); ++ break; ++ ++ case LIRC_GET_SEND_MODE: ++ ++ result = put_user(LIRC_SEND2MODE(LIRC_CAN_SEND_PULSE & ++ LIRC_CAN_SEND_MASK), ++ (unsigned long *) arg); ++ ++ if (result) ++ return result; ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ ++ result = get_user(lvalue, (unsigned long *) arg); ++ ++ if (result) ++ return result; ++ if (lvalue != (LIRC_MODE_PULSE&LIRC_CAN_SEND_MASK)) ++ return -EINVAL; ++ break; ++ ++ case LIRC_SET_SEND_CARRIER: ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ ++ set_send_carrier(ir, ivalue); ++ break; ++ ++ default: ++ return lirc_dev_fop_ioctl(node, filep, cmd, arg); ++ } ++ ++ return 0; ++} ++ ++static struct file_operations lirc_fops = { ++ .owner = THIS_MODULE, ++ .write = mceusb_transmit_ir, ++ .ioctl = mceusb_lirc_ioctl, ++ .read = lirc_dev_fop_read, ++ .poll = lirc_dev_fop_poll, ++ .open = lirc_dev_fop_open, ++ .release = lirc_dev_fop_close, ++}; ++ ++static int mceusb_gen1_init(struct mceusb_dev *ir) ++{ ++ int i, ret; ++ char junk[64], data[8]; ++ int partial = 0; ++ ++ /* ++ * Clear off the first few messages. These look like calibration ++ * or test data, I can't really tell. This also flushes in case ++ * we have random ir data queued up. ++ */ ++ for (i = 0; i < 40; i++) ++ usb_bulk_msg(ir->usbdev, ++ usb_rcvbulkpipe(ir->usbdev, ++ ir->usb_ep_in->bEndpointAddress), ++ junk, 64, &partial, HZ * 10); ++ ++ ir->is_pulse = 1; ++ ++ memset(data, 0, 8); ++ ++ /* Get Status */ ++ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ USB_REQ_GET_STATUS, USB_DIR_IN, ++ 0, 0, data, 2, HZ * 3); ++ ++ /* ret = usb_get_status( ir->usbdev, 0, 0, data ); */ ++ dev_dbg(ir->d->dev, "%s - ret = %d status = 0x%x 0x%x\n", __func__, ++ ret, data[0], data[1]); ++ ++ /* ++ * This is a strange one. They issue a set address to the device ++ * on the receive control pipe and expect a certain value pair back ++ */ ++ memset(data, 0, 8); ++ ++ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), ++ USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, ++ data, 2, HZ * 3); ++ dev_dbg(ir->d->dev, "%s - ret = %d, devnum = %d\n", ++ __func__, ret, ir->usbdev->devnum); ++ dev_dbg(ir->d->dev, "%s - data[0] = %d, data[1] = %d\n", ++ __func__, data[0], data[1]); ++ ++ /* set feature: bit rate 38400 bps */ ++ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), ++ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, ++ 0xc04e, 0x0000, NULL, 0, HZ * 3); ++ ++ dev_dbg(ir->d->dev, "%s - ret = %d\n", __func__, ret); ++ ++ /* bRequest 4: set char length to 8 bits */ ++ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), ++ 4, USB_TYPE_VENDOR, ++ 0x0808, 0x0000, NULL, 0, HZ * 3); ++ dev_dbg(ir->d->dev, "%s - retB = %d\n", __func__, ret); ++ ++ /* bRequest 2: set handshaking to use DTR/DSR */ ++ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), ++ 2, USB_TYPE_VENDOR, ++ 0x0000, 0x0100, NULL, 0, HZ * 3); ++ dev_dbg(ir->d->dev, "%s - retC = %d\n", __func__, ret); ++ ++ return ret; ++ ++}; ++ ++static int mceusb_dev_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct usb_host_interface *idesc; ++ struct usb_endpoint_descriptor *ep = NULL; ++ struct usb_endpoint_descriptor *ep_in = NULL; ++ struct usb_endpoint_descriptor *ep_out = NULL; ++ struct usb_host_config *config; ++ struct mceusb_dev *ir = NULL; ++ struct lirc_driver *driver = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ int devnum, pipe, maxp; ++ int minor = 0; ++ int i; ++ char buf[63], name[128] = ""; ++ int mem_failure = 0; ++ int is_pinnacle; ++ int is_microsoft_gen1; ++ ++ dev_dbg(&intf->dev, ": %s called\n", __func__); ++ ++ usb_reset_device(dev); ++ ++ config = dev->actconfig; ++ ++ idesc = intf->cur_altsetting; ++ ++ is_pinnacle = usb_match_id(intf, pinnacle_list) ? 1 : 0; ++ ++ is_microsoft_gen1 = usb_match_id(intf, microsoft_gen1_list) ? 1 : 0; ++ ++ /* step through the endpoints to find first bulk in and out endpoint */ ++ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { ++ ep = &idesc->endpoint[i].desc; ++ ++ if ((ep_in == NULL) ++ && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ == USB_DIR_IN) ++ && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_BULK) ++ || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_INT))) { ++ ++ dev_dbg(&intf->dev, ": acceptable inbound endpoint " ++ "found\n"); ++ ep_in = ep; ++ ep_in->bmAttributes = USB_ENDPOINT_XFER_INT; ++ if (is_pinnacle) ++ /* ++ * setting seems to 1 seem to cause issues with ++ * Pinnacle timing out on transfer. ++ */ ++ ep_in->bInterval = ep->bInterval; ++ else ++ ep_in->bInterval = 1; ++ } ++ ++ if ((ep_out == NULL) ++ && ((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ == USB_DIR_OUT) ++ && (((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_BULK) ++ || ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ == USB_ENDPOINT_XFER_INT))) { ++ ++ dev_dbg(&intf->dev, ": acceptable outbound endpoint " ++ "found\n"); ++ ep_out = ep; ++ ep_out->bmAttributes = USB_ENDPOINT_XFER_INT; ++ if (is_pinnacle) ++ /* ++ * setting seems to 1 seem to cause issues with ++ * Pinnacle timing out on transfer. ++ */ ++ ep_out->bInterval = ep->bInterval; ++ else ++ ep_out->bInterval = 1; ++ } ++ } ++ if (ep_in == NULL) { ++ dev_dbg(&intf->dev, ": inbound and/or endpoint not found\n"); ++ return -ENODEV; ++ } ++ ++ devnum = dev->devnum; ++ pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress); ++ maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); ++ ++ mem_failure = 0; ++ ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL); ++ if (!ir) ++ goto mem_alloc_fail; ++ ++ driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); ++ if (!driver) ++ goto mem_alloc_fail; ++ ++ rbuf = kzalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) ++ goto mem_alloc_fail; ++ ++ if (lirc_buffer_init(rbuf, sizeof(int), LIRCBUF_SIZE)) ++ goto mem_alloc_fail; ++ ++ ir->buf_in = usb_buffer_alloc(dev, maxp, GFP_ATOMIC, &ir->dma_in); ++ if (!ir->buf_in) ++ goto buf_in_alloc_fail; ++ ++ ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); ++ if (!ir->urb_in) ++ goto urb_in_alloc_fail; ++ ++ strcpy(driver->name, DRIVER_NAME); ++ driver->minor = -1; ++ driver->features = LIRC_CAN_SEND_PULSE | ++ LIRC_CAN_SET_TRANSMITTER_MASK | ++ LIRC_CAN_REC_MODE2 | ++ LIRC_CAN_SET_SEND_CARRIER; ++ driver->data = ir; ++ driver->rbuf = rbuf; ++ driver->set_use_inc = &mceusb_ir_open; ++ driver->set_use_dec = &mceusb_ir_close; ++ driver->code_length = sizeof(int) * 8; ++ driver->fops = &lirc_fops; ++ driver->dev = &intf->dev; ++ driver->owner = THIS_MODULE; ++ ++ mutex_init(&ir->dev_lock); ++ init_waitqueue_head(&ir->wait_out); ++ ++ minor = lirc_register_driver(driver); ++ if (minor < 0) ++ goto lirc_register_fail; ++ ++ driver->minor = minor; ++ ir->d = driver; ++ ir->devnum = devnum; ++ ir->usbdev = dev; ++ ir->len_in = maxp; ++ ir->overflow_len = 0; ++ ir->flags.connected = 0; ++ ir->flags.pinnacle = is_pinnacle; ++ ir->flags.microsoft_gen1 = is_microsoft_gen1; ++ ir->flags.transmitter_mask_inverted = ++ usb_match_id(intf, transmitter_mask_list) ? 0 : 1; ++ ++ ir->lircdata = PULSE_MASK; ++ ir->is_pulse = 0; ++ ++ /* ir->flags.transmitter_mask_inverted must be set */ ++ set_transmitter_mask(ir, MCE_DEFAULT_TX_MASK); ++ /* Saving usb interface data for use by the transmitter routine */ ++ ir->usb_ep_in = ep_in; ++ ir->usb_ep_out = ep_out; ++ ++ if (dev->descriptor.iManufacturer ++ && usb_string(dev, dev->descriptor.iManufacturer, ++ buf, sizeof(buf)) > 0) ++ strlcpy(name, buf, sizeof(name)); ++ if (dev->descriptor.iProduct ++ && usb_string(dev, dev->descriptor.iProduct, ++ buf, sizeof(buf)) > 0) ++ snprintf(name + strlen(name), sizeof(name) - strlen(name), ++ " %s", buf); ++ ++ /* inbound data */ ++ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, ++ maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval); ++ ir->urb_in->transfer_dma = ir->dma_in; ++ ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ /* initialize device */ ++ if (ir->flags.pinnacle) { ++ int usbret; ++ ++ /* ++ * I have no idea why but this reset seems to be crucial to ++ * getting the device to do outbound IO correctly - without ++ * this the device seems to hang, ignoring all input - although ++ * IR signals are correctly sent from the device, no input is ++ * interpreted by the device and the host never does the ++ * completion routine ++ */ ++ ++ usbret = usb_reset_configuration(dev); ++ dev_info(ir->d->dev, "usb reset config ret %x\n", usbret); ++ ++ /* ++ * its possible we really should wait for a return ++ * for each of these... ++ */ ++ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); ++ request_packet_async(ir, ep_out, pin_init1, sizeof(pin_init1), ++ MCEUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); ++ request_packet_async(ir, ep_out, pin_init2, sizeof(pin_init2), ++ MCEUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); ++ request_packet_async(ir, ep_out, pin_init3, sizeof(pin_init3), ++ MCEUSB_OUTBOUND); ++ } else if (ir->flags.microsoft_gen1) { ++ /* original ms mce device requires some additional setup */ ++ mceusb_gen1_init(ir); ++ } else { ++ ++ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); ++ request_packet_async(ir, ep_out, init1, ++ sizeof(init1), MCEUSB_OUTBOUND); ++ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); ++ request_packet_async(ir, ep_out, init2, ++ sizeof(init2), MCEUSB_OUTBOUND); ++ } ++ ++ /* ++ * if we don't issue the correct number of receives (MCEUSB_INBOUND) ++ * for each outbound, then the first few ir pulses will be interpreted ++ * by the usb_async_callback routine - we should ensure we have the ++ * right amount OR less - as the mceusb_dev_recv routine will handle ++ * the control packets OK - they start with 0x9f - but the async ++ * callback doesn't handle ir pulse packets ++ */ ++ request_packet_async(ir, ep_in, NULL, maxp, 0); ++ ++ usb_set_intfdata(intf, ir); ++ ++ dev_info(ir->d->dev, "Registered %s on usb%d:%d\n", name, ++ dev->bus->busnum, devnum); ++ ++ return 0; ++ ++ /* Error-handling path */ ++lirc_register_fail: ++ usb_free_urb(ir->urb_in); ++urb_in_alloc_fail: ++ usb_buffer_free(dev, maxp, ir->buf_in, ir->dma_in); ++buf_in_alloc_fail: ++ lirc_buffer_free(rbuf); ++mem_alloc_fail: ++ kfree(rbuf); ++ kfree(driver); ++ kfree(ir); ++ dev_info(&intf->dev, "out of memory (code=%d)\n", mem_failure); ++ ++ return -ENOMEM; ++} ++ ++ ++static void mceusb_dev_disconnect(struct usb_interface *intf) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct mceusb_dev *ir = usb_get_intfdata(intf); ++ ++ usb_set_intfdata(intf, NULL); ++ ++ if (!ir || !ir->d) ++ return; ++ ++ ir->usbdev = NULL; ++ wake_up_all(&ir->wait_out); ++ ++ mutex_lock(&ir->dev_lock); ++ usb_kill_urb(ir->urb_in); ++ usb_free_urb(ir->urb_in); ++ usb_buffer_free(dev, ir->len_in, ir->buf_in, ir->dma_in); ++ mutex_unlock(&ir->dev_lock); ++ ++ unregister_from_lirc(ir); ++} ++ ++static int mceusb_dev_suspend(struct usb_interface *intf, pm_message_t message) ++{ ++ struct mceusb_dev *ir = usb_get_intfdata(intf); ++ dev_info(ir->d->dev, "suspend\n"); ++ usb_kill_urb(ir->urb_in); ++ return 0; ++} ++ ++static int mceusb_dev_resume(struct usb_interface *intf) ++{ ++ struct mceusb_dev *ir = usb_get_intfdata(intf); ++ dev_info(ir->d->dev, "resume\n"); ++ if (usb_submit_urb(ir->urb_in, GFP_ATOMIC)) ++ return -EIO; ++ return 0; ++} ++ ++static struct usb_driver mceusb_dev_driver = { ++ .name = DRIVER_NAME, ++ .probe = mceusb_dev_probe, ++ .disconnect = mceusb_dev_disconnect, ++ .suspend = mceusb_dev_suspend, ++ .resume = mceusb_dev_resume, ++ .reset_resume = mceusb_dev_resume, ++ .id_table = mceusb_dev_table ++}; ++ ++static int __init mceusb_dev_init(void) ++{ ++ int i; ++ ++ printk(KERN_INFO DRIVER_NAME ": " DRIVER_DESC " " DRIVER_VERSION "\n"); ++ printk(KERN_INFO DRIVER_NAME ": " DRIVER_AUTHOR "\n"); ++ if (debug) ++ printk(KERN_DEBUG DRIVER_NAME ": debug mode enabled\n"); ++ ++ i = usb_register(&mceusb_dev_driver); ++ if (i < 0) { ++ printk(KERN_ERR DRIVER_NAME ++ ": usb register failed, result = %d\n", i); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static void __exit mceusb_dev_exit(void) ++{ ++ usb_deregister(&mceusb_dev_driver); ++} ++ ++module_init(mceusb_dev_init); ++module_exit(mceusb_dev_exit); ++ ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, mceusb_dev_table); ++/* this was originally lirc_mceusb2, lirc_mceusb and lirc_mceusb2 merged now */ ++MODULE_ALIAS("lirc_mceusb2"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Debug enabled or not"); +diff --git a/drivers/input/lirc/lirc_parallel.c b/drivers/input/lirc/lirc_parallel.c +new file mode 100644 +index 0000000..bb57b3e +--- /dev/null ++++ b/drivers/input/lirc/lirc_parallel.c +@@ -0,0 +1,709 @@ ++/* ++ * lirc_parallel.c ++ * ++ * lirc_parallel - device driver for infra-red signal receiving and ++ * transmitting unit built by the author ++ * ++ * Copyright (C) 1998 Christoph Bartelmus ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++/*** Includes ***/ ++ ++#ifdef CONFIG_SMP ++#error "--- Sorry, this driver is not SMP safe. ---" ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++#include "lirc_parallel.h" ++ ++#define LIRC_DRIVER_NAME "lirc_parallel" ++ ++#ifndef LIRC_IRQ ++#define LIRC_IRQ 7 ++#endif ++#ifndef LIRC_PORT ++#define LIRC_PORT 0x378 ++#endif ++#ifndef LIRC_TIMER ++#define LIRC_TIMER 65536 ++#endif ++ ++/*** Global Variables ***/ ++ ++static int debug; ++static int check_pselecd; ++ ++unsigned int irq = LIRC_IRQ; ++unsigned int io = LIRC_PORT; ++#ifdef LIRC_TIMER ++unsigned int timer; ++unsigned int default_timer = LIRC_TIMER; ++#endif ++ ++#define WBUF_SIZE (256) ++#define RBUF_SIZE (256) /* this must be a power of 2 larger than 1 */ ++ ++static int wbuf[WBUF_SIZE]; ++static int rbuf[RBUF_SIZE]; ++ ++DECLARE_WAIT_QUEUE_HEAD(lirc_wait); ++ ++unsigned int rptr; ++unsigned int wptr; ++unsigned int lost_irqs; ++int is_open; ++ ++struct parport *pport; ++struct pardevice *ppdevice; ++int is_claimed; ++ ++unsigned int tx_mask = 1; ++ ++/*** Internal Functions ***/ ++ ++static unsigned int in(int offset) ++{ ++ switch (offset) { ++ case LIRC_LP_BASE: ++ return parport_read_data(pport); ++ case LIRC_LP_STATUS: ++ return parport_read_status(pport); ++ case LIRC_LP_CONTROL: ++ return parport_read_control(pport); ++ } ++ return 0; /* make compiler happy */ ++} ++ ++static void out(int offset, int value) ++{ ++ switch (offset) { ++ case LIRC_LP_BASE: ++ parport_write_data(pport, value); ++ break; ++ case LIRC_LP_CONTROL: ++ parport_write_control(pport, value); ++ break; ++ case LIRC_LP_STATUS: ++ printk(KERN_INFO "%s: attempt to write to status register\n", ++ LIRC_DRIVER_NAME); ++ break; ++ } ++} ++ ++static unsigned int lirc_get_timer(void) ++{ ++ return in(LIRC_PORT_TIMER) & LIRC_PORT_TIMER_BIT; ++} ++ ++static unsigned int lirc_get_signal(void) ++{ ++ return in(LIRC_PORT_SIGNAL) & LIRC_PORT_SIGNAL_BIT; ++} ++ ++static void lirc_on(void) ++{ ++ out(LIRC_PORT_DATA, tx_mask); ++} ++ ++static void lirc_off(void) ++{ ++ out(LIRC_PORT_DATA, 0); ++} ++ ++static unsigned int init_lirc_timer(void) ++{ ++ struct timeval tv, now; ++ unsigned int level, newlevel, timeelapsed, newtimer; ++ int count = 0; ++ ++ do_gettimeofday(&tv); ++ tv.tv_sec++; /* wait max. 1 sec. */ ++ level = lirc_get_timer(); ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ count++; ++ level = newlevel; ++ do_gettimeofday(&now); ++ } while (count < 1000 && (now.tv_sec < tv.tv_sec ++ || (now.tv_sec == tv.tv_sec ++ && now.tv_usec < tv.tv_usec))); ++ ++ timeelapsed = ((now.tv_sec + 1 - tv.tv_sec)*1000000 ++ + (now.tv_usec - tv.tv_usec)); ++ if (count >= 1000 && timeelapsed > 0) { ++ if (default_timer == 0) { ++ /* autodetect timer */ ++ newtimer = (1000000*count)/timeelapsed; ++ printk(KERN_INFO "%s: %u Hz timer detected\n", ++ LIRC_DRIVER_NAME, newtimer); ++ return newtimer; ++ } else { ++ newtimer = (1000000*count)/timeelapsed; ++ if (abs(newtimer - default_timer) > default_timer/10) { ++ /* bad timer */ ++ printk(KERN_NOTICE "%s: bad timer: %u Hz\n", ++ LIRC_DRIVER_NAME, newtimer); ++ printk(KERN_NOTICE "%s: using default timer: " ++ "%u Hz\n", ++ LIRC_DRIVER_NAME, default_timer); ++ return default_timer; ++ } else { ++ printk(KERN_INFO "%s: %u Hz timer detected\n", ++ LIRC_DRIVER_NAME, newtimer); ++ return newtimer; /* use detected value */ ++ } ++ } ++ } else { ++ printk(KERN_NOTICE "%s: no timer detected\n", LIRC_DRIVER_NAME); ++ return 0; ++ } ++} ++ ++static int lirc_claim(void) ++{ ++ if (parport_claim(ppdevice) != 0) { ++ printk(KERN_WARNING "%s: could not claim port\n", ++ LIRC_DRIVER_NAME); ++ printk(KERN_WARNING "%s: waiting for port becoming available" ++ "\n", LIRC_DRIVER_NAME); ++ if (parport_claim_or_block(ppdevice) < 0) { ++ printk(KERN_NOTICE "%s: could not claim port, giving" ++ " up\n", LIRC_DRIVER_NAME); ++ return 0; ++ } ++ } ++ out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); ++ is_claimed = 1; ++ return 1; ++} ++ ++/*** interrupt handler ***/ ++ ++static void rbuf_write(int signal) ++{ ++ unsigned int nwptr; ++ ++ nwptr = (wptr + 1) & (RBUF_SIZE - 1); ++ if (nwptr == rptr) { ++ /* no new signals will be accepted */ ++ lost_irqs++; ++ printk(KERN_NOTICE "%s: buffer overrun\n", LIRC_DRIVER_NAME); ++ return; ++ } ++ rbuf[wptr] = signal; ++ wptr = nwptr; ++} ++ ++static void irq_handler(void *blah) ++{ ++ struct timeval tv; ++ static struct timeval lasttv; ++ static int init; ++ long signal; ++ int data; ++ unsigned int level, newlevel; ++ unsigned int timeout; ++ ++ if (!module_refcount(THIS_MODULE)) ++ return; ++ ++ if (!is_claimed) ++ return; ++ ++#if 0 ++ /* disable interrupt */ ++ disable_irq(irq); ++ out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ) & (~LP_PINTEN)); ++#endif ++ if (check_pselecd && (in(1) & LP_PSELECD)) ++ return; ++ ++#ifdef LIRC_TIMER ++ if (init) { ++ do_gettimeofday(&tv); ++ ++ signal = tv.tv_sec - lasttv.tv_sec; ++ if (signal > 15) ++ /* really long time */ ++ data = PULSE_MASK; ++ else ++ data = (int) (signal*1000000 + ++ tv.tv_usec - lasttv.tv_usec + ++ LIRC_SFH506_DELAY); ++ ++ rbuf_write(data); /* space */ ++ } else { ++ if (timer == 0) { ++ /* ++ * wake up; we'll lose this signal, but it will be ++ * garbage if the device is turned on anyway ++ */ ++ timer = init_lirc_timer(); ++ /* enable_irq(irq); */ ++ return; ++ } ++ init = 1; ++ } ++ ++ timeout = timer/10; /* timeout after 1/10 sec. */ ++ signal = 1; ++ level = lirc_get_timer(); ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ signal++; ++ level = newlevel; ++ ++ /* giving up */ ++ if (signal > timeout ++ || (check_pselecd && (in(1) & LP_PSELECD))) { ++ signal = 0; ++ printk(KERN_NOTICE "%s: timeout\n", LIRC_DRIVER_NAME); ++ break; ++ } ++ } while (lirc_get_signal()); ++ ++ if (signal != 0) { ++ /* ajust value to usecs */ ++ unsigned long long helper; ++ ++ helper = ((unsigned long long) signal)*1000000; ++ do_div(helper, timer); ++ signal = (long) helper; ++ ++ if (signal > LIRC_SFH506_DELAY) ++ data = signal - LIRC_SFH506_DELAY; ++ else ++ data = 1; ++ rbuf_write(PULSE_BIT|data); /* pulse */ ++ } ++ do_gettimeofday(&lasttv); ++#else ++ /* add your code here */ ++#endif ++ ++ wake_up_interruptible(&lirc_wait); ++ ++ /* enable interrupt */ ++ /* ++ enable_irq(irq); ++ out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN); ++ */ ++} ++ ++/*** file operations ***/ ++ ++static loff_t lirc_lseek(struct file *filep, loff_t offset, int orig) ++{ ++ return -ESPIPE; ++} ++ ++static ssize_t lirc_read(struct file *filep, char *buf, size_t n, loff_t *ppos) ++{ ++ int result = 0; ++ int count = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ add_wait_queue(&lirc_wait, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (count < n) { ++ if (rptr != wptr) { ++ if (copy_to_user(buf+count, (char *) &rbuf[rptr], ++ sizeof(int))) { ++ result = -EFAULT; ++ break; ++ } ++ rptr = (rptr + 1) & (RBUF_SIZE - 1); ++ count += sizeof(int); ++ } else { ++ if (filep->f_flags & O_NONBLOCK) { ++ result = -EAGAIN; ++ break; ++ } ++ if (signal_pending(current)) { ++ result = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ } ++ remove_wait_queue(&lirc_wait, &wait); ++ set_current_state(TASK_RUNNING); ++ return count ? count : result; ++} ++ ++static ssize_t lirc_write(struct file *filep, const char *buf, size_t n, ++ loff_t *ppos) ++{ ++ int count; ++ unsigned int i; ++ unsigned int level, newlevel; ++ unsigned long flags; ++ int counttimer; ++ ++ if (!is_claimed) ++ return -EBUSY; ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ count = n / sizeof(int); ++ ++ if (count > WBUF_SIZE || count % 2 == 0) ++ return -EINVAL; ++ ++ if (copy_from_user(wbuf, buf, n)) ++ return -EFAULT; ++ ++#ifdef LIRC_TIMER ++ if (timer == 0) { ++ /* try again if device is ready */ ++ timer = init_lirc_timer(); ++ if (timer == 0) ++ return -EIO; ++ } ++ ++ /* adjust values from usecs */ ++ for (i = 0; i < count; i++) { ++ unsigned long long helper; ++ ++ helper = ((unsigned long long) wbuf[i])*timer; ++ do_div(helper, 1000000); ++ wbuf[i] = (int) helper; ++ } ++ ++ local_irq_save(flags); ++ i = 0; ++ while (i < count) { ++ level = lirc_get_timer(); ++ counttimer = 0; ++ lirc_on(); ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ counttimer++; ++ level = newlevel; ++ if (check_pselecd && (in(1) & LP_PSELECD)) { ++ lirc_off(); ++ local_irq_restore(flags); ++ return -EIO; ++ } ++ } while (counttimer < wbuf[i]); ++ i++; ++ ++ lirc_off(); ++ if (i == count) ++ break; ++ counttimer = 0; ++ do { ++ newlevel = lirc_get_timer(); ++ if (level == 0 && newlevel != 0) ++ counttimer++; ++ level = newlevel; ++ if (check_pselecd && (in(1) & LP_PSELECD)) { ++ local_irq_restore(flags); ++ return -EIO; ++ } ++ } while (counttimer < wbuf[i]); ++ i++; ++ } ++ local_irq_restore(flags); ++#else ++ /* place code that handles write without external timer here */ ++#endif ++ return n; ++} ++ ++static unsigned int lirc_poll(struct file *file, poll_table *wait) ++{ ++ poll_wait(file, &lirc_wait, wait); ++ if (rptr != wptr) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ int result; ++ unsigned long features = LIRC_CAN_SET_TRANSMITTER_MASK | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2; ++ unsigned long mode; ++ unsigned int ivalue; ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ result = put_user(features, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_GET_SEND_MODE: ++ result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_GET_REC_MODE: ++ result = put_user(LIRC_MODE_MODE2, (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ case LIRC_SET_SEND_MODE: ++ result = get_user(mode, (unsigned long *) arg); ++ if (result) ++ return result; ++ if (mode != LIRC_MODE_PULSE) ++ return -EINVAL; ++ break; ++ case LIRC_SET_REC_MODE: ++ result = get_user(mode, (unsigned long *) arg); ++ if (result) ++ return result; ++ if (mode != LIRC_MODE_MODE2) ++ return -ENOSYS; ++ break; ++ case LIRC_SET_TRANSMITTER_MASK: ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ if ((ivalue & LIRC_PARALLEL_TRANSMITTER_MASK) != ivalue) ++ return LIRC_PARALLEL_MAX_TRANSMITTERS; ++ tx_mask = ivalue; ++ break; ++ default: ++ return -ENOIOCTLCMD; ++ } ++ return 0; ++} ++ ++static int lirc_open(struct inode *node, struct file *filep) ++{ ++ if (module_refcount(THIS_MODULE) || !lirc_claim()) ++ return -EBUSY; ++ ++ parport_enable_irq(pport); ++ ++ /* init read ptr */ ++ rptr = 0; ++ wptr = 0; ++ lost_irqs = 0; ++ ++ is_open = 1; ++ return 0; ++} ++ ++static int lirc_close(struct inode *node, struct file *filep) ++{ ++ if (is_claimed) { ++ is_claimed = 0; ++ parport_release(ppdevice); ++ } ++ is_open = 0; ++ return 0; ++} ++ ++static struct file_operations lirc_fops = { ++ .owner = THIS_MODULE, ++ .llseek = lirc_lseek, ++ .read = lirc_read, ++ .write = lirc_write, ++ .poll = lirc_poll, ++ .ioctl = lirc_ioctl, ++ .open = lirc_open, ++ .release = lirc_close ++}; ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++static struct lirc_driver driver = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++static int pf(void *handle); ++static void kf(void *handle); ++ ++static struct timer_list poll_timer; ++static void poll_state(unsigned long ignored); ++ ++static void poll_state(unsigned long ignored) ++{ ++ printk(KERN_NOTICE "%s: time\n", ++ LIRC_DRIVER_NAME); ++ del_timer(&poll_timer); ++ if (is_claimed) ++ return; ++ kf(NULL); ++ if (!is_claimed) { ++ printk(KERN_NOTICE "%s: could not claim port, giving up\n", ++ LIRC_DRIVER_NAME); ++ init_timer(&poll_timer); ++ poll_timer.expires = jiffies + HZ; ++ poll_timer.data = (unsigned long)current; ++ poll_timer.function = poll_state; ++ add_timer(&poll_timer); ++ } ++} ++ ++static int pf(void *handle) ++{ ++ parport_disable_irq(pport); ++ is_claimed = 0; ++ return 0; ++} ++ ++static void kf(void *handle) ++{ ++ if (!is_open) ++ return; ++ if (!lirc_claim()) ++ return; ++ parport_enable_irq(pport); ++ lirc_off(); ++ /* this is a bit annoying when you actually print...*/ ++ /* ++ printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME); ++ */ ++} ++ ++/*** module initialization and cleanup ***/ ++ ++static int __init lirc_parallel_init(void) ++{ ++ pport = parport_find_base(io); ++ if (pport == NULL) { ++ printk(KERN_NOTICE "%s: no port at %x found\n", ++ LIRC_DRIVER_NAME, io); ++ return -ENXIO; ++ } ++ ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME, ++ pf, kf, irq_handler, 0, NULL); ++ parport_put_port(pport); ++ if (ppdevice == NULL) { ++ printk(KERN_NOTICE "%s: parport_register_device() failed\n", ++ LIRC_DRIVER_NAME); ++ return -ENXIO; ++ } ++ if (parport_claim(ppdevice) != 0) ++ goto skip_init; ++ is_claimed = 1; ++ out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP); ++ ++#ifdef LIRC_TIMER ++ if (debug) ++ out(LIRC_PORT_DATA, tx_mask); ++ ++ timer = init_lirc_timer(); ++ ++#if 0 /* continue even if device is offline */ ++ if (timer == 0) { ++ is_claimed = 0; ++ parport_release(pport); ++ parport_unregister_device(ppdevice); ++ return -EIO; ++ } ++ ++#endif ++ if (debug) ++ out(LIRC_PORT_DATA, 0); ++#endif ++ ++ is_claimed = 0; ++ parport_release(ppdevice); ++ skip_init: ++ driver.minor = lirc_register_driver(&driver); ++ if (driver.minor < 0) { ++ printk(KERN_NOTICE "%s: register_chrdev() failed\n", ++ LIRC_DRIVER_NAME); ++ parport_unregister_device(ppdevice); ++ return -EIO; ++ } ++ printk(KERN_INFO "%s: installed using port 0x%04x irq %d\n", ++ LIRC_DRIVER_NAME, io, irq); ++ return 0; ++} ++ ++static void __exit lirc_parallel_exit(void) ++{ ++ parport_unregister_device(ppdevice); ++ lirc_unregister_driver(driver.minor); ++} ++ ++module_init(lirc_parallel_init); ++module_exit(lirc_parallel_exit); ++ ++MODULE_DESCRIPTION("Infrared receiver driver for parallel ports."); ++MODULE_AUTHOR("Christoph Bartelmus"); ++MODULE_LICENSE("GPL"); ++ ++module_param(io, int, S_IRUGO); ++MODULE_PARM_DESC(io, "I/O address base (0x3bc, 0x378 or 0x278)"); ++ ++module_param(irq, int, S_IRUGO); ++MODULE_PARM_DESC(irq, "Interrupt (7 or 5)"); ++ ++module_param(tx_mask, int, S_IRUGO); ++MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_param(check_pselecd, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Check for printer (default: 0)"); +diff --git a/drivers/input/lirc/lirc_parallel.h b/drivers/input/lirc/lirc_parallel.h +new file mode 100644 +index 0000000..4bed6af +--- /dev/null ++++ b/drivers/input/lirc/lirc_parallel.h +@@ -0,0 +1,26 @@ ++/* lirc_parallel.h */ ++ ++#ifndef _LIRC_PARALLEL_H ++#define _LIRC_PARALLEL_H ++ ++#include ++ ++#define LIRC_PORT_LEN 3 ++ ++#define LIRC_LP_BASE 0 ++#define LIRC_LP_STATUS 1 ++#define LIRC_LP_CONTROL 2 ++ ++#define LIRC_PORT_DATA LIRC_LP_BASE /* base */ ++#define LIRC_PORT_TIMER LIRC_LP_STATUS /* status port */ ++#define LIRC_PORT_TIMER_BIT LP_PBUSY /* busy signal */ ++#define LIRC_PORT_SIGNAL LIRC_LP_STATUS /* status port */ ++#define LIRC_PORT_SIGNAL_BIT LP_PACK /* ack signal */ ++#define LIRC_PORT_IRQ LIRC_LP_CONTROL /* control port */ ++ ++#define LIRC_SFH506_DELAY 0 /* delay t_phl in usecs */ ++ ++#define LIRC_PARALLEL_MAX_TRANSMITTERS 8 ++#define LIRC_PARALLEL_TRANSMITTER_MASK ((1< ++ * Tim Davies ++ * ++ * This driver was derived from: ++ * Venky Raju ++ * "lirc_imon - "LIRC/VFD driver for Ahanix/Soundgraph IMON IR/VFD" ++ * Paul Miller 's 2003-2004 ++ * "lirc_atiusb - USB remote support for LIRC" ++ * Culver Consulting Services 's 2003 ++ * "Sasem OnAir VFD/IR USB driver" ++ * ++ * ++ * NOTE - The LCDproc iMon driver should work with this module. More info at ++ * http://www.frogstorm.info/sasem ++ */ ++ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++ ++#define MOD_AUTHOR "Oliver Stabel , " \ ++ "Tim Davies " ++#define MOD_DESC "USB Driver for Sasem Remote Controller V1.1" ++#define MOD_NAME "lirc_sasem" ++#define MOD_VERSION "0.5" ++ ++#define VFD_MINOR_BASE 144 /* Same as LCD */ ++#define DEVICE_NAME "lcd%d" ++ ++#define BUF_CHUNK_SIZE 8 ++#define BUF_SIZE 128 ++ ++#define IOCTL_LCD_CONTRAST 1 ++ ++/*** P R O T O T Y P E S ***/ ++ ++/* USB Callback prototypes */ ++static int sasem_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void sasem_disconnect(struct usb_interface *interface); ++static void usb_rx_callback(struct urb *urb); ++static void usb_tx_callback(struct urb *urb); ++ ++/* VFD file_operations function prototypes */ ++static int vfd_open(struct inode *inode, struct file *file); ++static int vfd_ioctl(struct inode *inode, struct file *file, ++ unsigned cmd, unsigned long arg); ++static int vfd_close(struct inode *inode, struct file *file); ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/* LIRC driver function prototypes */ ++static int ir_open(void *data); ++static void ir_close(void *data); ++ ++/* Driver init/exit prototypes */ ++static int __init sasem_init(void); ++static void __exit sasem_exit(void); ++ ++/*** G L O B A L S ***/ ++ ++struct sasem_context { ++ ++ struct usb_device *dev; ++ int vfd_isopen; /* VFD port has been opened */ ++ unsigned int vfd_contrast; /* VFD contrast */ ++ int ir_isopen; /* IR port has been opened */ ++ int dev_present; /* USB device presence */ ++ struct mutex ctx_lock; /* to lock this object */ ++ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ ++ ++ struct lirc_driver *driver; ++ struct usb_endpoint_descriptor *rx_endpoint; ++ struct usb_endpoint_descriptor *tx_endpoint; ++ struct urb *rx_urb; ++ struct urb *tx_urb; ++ unsigned char usb_rx_buf[8]; ++ unsigned char usb_tx_buf[8]; ++ ++ struct tx_t { ++ unsigned char data_buf[32]; /* user data buffer */ ++ struct completion finished; /* wait for write to finish */ ++ atomic_t busy; /* write in progress */ ++ int status; /* status of tx completion */ ++ } tx; ++ ++ /* for dealing with repeat codes (wish there was a toggle bit!) */ ++ struct timeval presstime; ++ char lastcode[8]; ++ int codesaved; ++}; ++ ++/* VFD file operations */ ++static struct file_operations vfd_fops = { ++ .owner = THIS_MODULE, ++ .open = &vfd_open, ++ .write = &vfd_write, ++ .ioctl = &vfd_ioctl, ++ .release = &vfd_close, ++}; ++ ++/* USB Device ID for Sasem USB Control Board */ ++static struct usb_device_id sasem_usb_id_table[] = { ++ /* Sasem USB Control Board */ ++ { USB_DEVICE(0x11ba, 0x0101) }, ++ /* Terminating entry */ ++ {} ++}; ++ ++/* USB Device data */ ++static struct usb_driver sasem_driver = { ++ .name = MOD_NAME, ++ .probe = sasem_probe, ++ .disconnect = sasem_disconnect, ++ .id_table = sasem_usb_id_table, ++}; ++ ++static struct usb_class_driver sasem_class = { ++ .name = DEVICE_NAME, ++ .fops = &vfd_fops, ++ .minor_base = VFD_MINOR_BASE, ++}; ++ ++/* to prevent races between open() and disconnect() */ ++static DEFINE_MUTEX(disconnect_lock); ++ ++static int debug; ++ ++ ++/*** M O D U L E C O D E ***/ ++ ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); ++MODULE_LICENSE("GPL"); ++module_param(debug, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)"); ++ ++static void delete_context(struct sasem_context *context) ++{ ++ usb_free_urb(context->tx_urb); /* VFD */ ++ usb_free_urb(context->rx_urb); /* IR */ ++ lirc_buffer_free(context->driver->rbuf); ++ kfree(context->driver->rbuf); ++ kfree(context->driver); ++ kfree(context); ++ ++ if (debug) ++ printk(KERN_INFO "%s: context deleted\n", __func__); ++} ++ ++static void deregister_from_lirc(struct sasem_context *context) ++{ ++ int retval; ++ int minor = context->driver->minor; ++ ++ retval = lirc_unregister_driver(minor); ++ if (retval) ++ err("%s: unable to deregister from lirc (%d)", ++ __func__, retval); ++ else ++ printk(KERN_INFO "Deregistered Sasem driver (minor:%d)\n", ++ minor); ++ ++} ++ ++/** ++ * Called when the VFD device (e.g. /dev/usb/lcd) ++ * is opened by the application. ++ */ ++static int vfd_open(struct inode *inode, struct file *file) ++{ ++ struct usb_interface *interface; ++ struct sasem_context *context = NULL; ++ int subminor; ++ int retval = 0; ++ ++ /* prevent races with disconnect */ ++ mutex_lock(&disconnect_lock); ++ ++ subminor = iminor(inode); ++ interface = usb_find_interface(&sasem_driver, subminor); ++ if (!interface) { ++ err("%s: could not find interface for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ context = usb_get_intfdata(interface); ++ ++ if (!context) { ++ err("%s: no context found for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (context->vfd_isopen) { ++ err("%s: VFD port is already open", __func__); ++ retval = -EBUSY; ++ } else { ++ context->vfd_isopen = 1; ++ file->private_data = context; ++ printk(KERN_INFO "VFD port opened\n"); ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ ++exit: ++ mutex_unlock(&disconnect_lock); ++ return retval; ++} ++ ++/** ++ * Called when the VFD device (e.g. /dev/usb/lcd) ++ * is closed by the application. ++ */ ++static int vfd_ioctl(struct inode *inode, struct file *file, ++ unsigned cmd, unsigned long arg) ++{ ++ struct sasem_context *context = NULL; ++ ++ context = (struct sasem_context *) file->private_data; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ switch (cmd) { ++ case IOCTL_LCD_CONTRAST: ++ if (arg > 1000) ++ arg = 1000; ++ context->vfd_contrast = (unsigned int)arg; ++ break; ++ default: ++ printk(KERN_INFO "Unknown IOCTL command\n"); ++ mutex_unlock(&context->ctx_lock); ++ return -ENOIOCTLCMD; /* not supported */ ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ return 0; ++} ++ ++/** ++ * Called when the VFD device (e.g. /dev/usb/lcd) ++ * is closed by the application. ++ */ ++static int vfd_close(struct inode *inode, struct file *file) ++{ ++ struct sasem_context *context = NULL; ++ int retval = 0; ++ ++ context = (struct sasem_context *) file->private_data; ++ ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (!context->vfd_isopen) { ++ err("%s: VFD is not open", __func__); ++ retval = -EIO; ++ } else { ++ context->vfd_isopen = 0; ++ printk(KERN_INFO "VFD port closed\n"); ++ if (!context->dev_present && !context->ir_isopen) { ++ ++ /* Device disconnected before close and IR port is ++ * not open. If IR port is open, context will be ++ * deleted by ir_close. */ ++ mutex_unlock(&context->ctx_lock); ++ delete_context(context); ++ return retval; ++ } ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ return retval; ++} ++ ++/** ++ * Sends a packet to the VFD. ++ */ ++static int send_packet(struct sasem_context *context) ++{ ++ unsigned int pipe; ++ int interval = 0; ++ int retval = 0; ++ ++ pipe = usb_sndintpipe(context->dev, ++ context->tx_endpoint->bEndpointAddress); ++ interval = context->tx_endpoint->bInterval; ++ ++ usb_fill_int_urb(context->tx_urb, context->dev, pipe, ++ context->usb_tx_buf, sizeof(context->usb_tx_buf), ++ usb_tx_callback, context, interval); ++ ++ context->tx_urb->actual_length = 0; ++ ++ init_completion(&context->tx.finished); ++ atomic_set(&(context->tx.busy), 1); ++ ++ retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); ++ if (retval) { ++ atomic_set(&(context->tx.busy), 0); ++ err("%s: error submitting urb (%d)", __func__, retval); ++ } else { ++ /* Wait for transmission to complete (or abort) */ ++ mutex_unlock(&context->ctx_lock); ++ wait_for_completion(&context->tx.finished); ++ mutex_lock(&context->ctx_lock); ++ ++ retval = context->tx.status; ++ if (retval) ++ err("%s: packet tx failed (%d)", __func__, retval); ++ } ++ ++ return retval; ++} ++ ++/** ++ * Writes data to the VFD. The Sasem VFD is 2x16 characters ++ * and requires data in 9 consecutive USB interrupt packets, ++ * each packet carrying 8 bytes. ++ */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int i; ++ int retval = 0; ++ struct sasem_context *context; ++ ++ context = (struct sasem_context *) file->private_data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (!context->dev_present) { ++ err("%s: no Sasem device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes <= 0 || n_bytes > 32) { ++ err("%s: invalid payload size", __func__); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ retval = copy_from_user(context->tx.data_buf, buf, n_bytes); ++ if (retval < 0) ++ goto exit; ++ ++ /* Pad with spaces */ ++ for (i = n_bytes; i < 32; ++i) ++ context->tx.data_buf[i] = ' '; ++ ++ /* Nine 8 byte packets to be sent */ ++ /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0" ++ * will clear the VFD */ ++ for (i = 0; i < 9; i++) { ++ switch (i) { ++ case 0: ++ memcpy(context->usb_tx_buf, "\x07\0\0\0\0\0\0\0", 8); ++ context->usb_tx_buf[1] = (context->vfd_contrast) ? ++ (0x2B - (context->vfd_contrast - 1) / 250) ++ : 0x2B; ++ break; ++ case 1: ++ memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); ++ break; ++ case 2: ++ memcpy(context->usb_tx_buf, "\x0b\x01\0\0\0\0\0\0", 8); ++ break; ++ case 3: ++ memcpy(context->usb_tx_buf, context->tx.data_buf, 8); ++ break; ++ case 4: ++ memcpy(context->usb_tx_buf, ++ context->tx.data_buf + 8, 8); ++ break; ++ case 5: ++ memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); ++ break; ++ case 6: ++ memcpy(context->usb_tx_buf, "\x0b\x02\0\0\0\0\0\0", 8); ++ break; ++ case 7: ++ memcpy(context->usb_tx_buf, ++ context->tx.data_buf + 16, 8); ++ break; ++ case 8: ++ memcpy(context->usb_tx_buf, ++ context->tx.data_buf + 24, 8); ++ break; ++ } ++ retval = send_packet(context); ++ if (retval) { ++ ++ err("%s: send packet failed for packet #%d", ++ __func__, i); ++ goto exit; ++ } ++ } ++exit: ++ ++ mutex_unlock(&context->ctx_lock); ++ ++ return (!retval) ? n_bytes : retval; ++} ++ ++/** ++ * Callback function for USB core API: transmit data ++ */ ++static void usb_tx_callback(struct urb *urb) ++{ ++ struct sasem_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct sasem_context *) urb->context; ++ if (!context) ++ return; ++ ++ context->tx.status = urb->status; ++ ++ /* notify waiters that write has finished */ ++ atomic_set(&context->tx.busy, 0); ++ complete(&context->tx.finished); ++ ++ return; ++} ++ ++/** ++ * Called by lirc_dev when the application opens /dev/lirc ++ */ ++static int ir_open(void *data) ++{ ++ int retval = 0; ++ struct sasem_context *context; ++ ++ /* prevent races with disconnect */ ++ mutex_lock(&disconnect_lock); ++ ++ context = (struct sasem_context *) data; ++ ++ mutex_lock(&context->ctx_lock); ++ ++ if (context->ir_isopen) { ++ err("%s: IR port is already open", __func__); ++ retval = -EBUSY; ++ goto exit; ++ } ++ ++ usb_fill_int_urb(context->rx_urb, context->dev, ++ usb_rcvintpipe(context->dev, ++ context->rx_endpoint->bEndpointAddress), ++ context->usb_rx_buf, sizeof(context->usb_rx_buf), ++ usb_rx_callback, context, context->rx_endpoint->bInterval); ++ ++ retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); ++ ++ if (retval) ++ err("%s: usb_submit_urb failed for ir_open (%d)", ++ __func__, retval); ++ else { ++ context->ir_isopen = 1; ++ printk(KERN_INFO "IR port opened\n"); ++ } ++ ++exit: ++ mutex_unlock(&context->ctx_lock); ++ ++ mutex_unlock(&disconnect_lock); ++ return 0; ++} ++ ++/** ++ * Called by lirc_dev when the application closes /dev/lirc ++ */ ++static void ir_close(void *data) ++{ ++ struct sasem_context *context; ++ ++ context = (struct sasem_context *)data; ++ if (!context) { ++ err("%s: no context for device", __func__); ++ return; ++ } ++ ++ mutex_lock(&context->ctx_lock); ++ ++ usb_kill_urb(context->rx_urb); ++ context->ir_isopen = 0; ++ printk(KERN_INFO "IR port closed\n"); ++ ++ if (!context->dev_present) { ++ ++ /* ++ * Device disconnected while IR port was ++ * still open. Driver was not deregistered ++ * at disconnect time, so do it now. ++ */ ++ deregister_from_lirc(context); ++ ++ if (!context->vfd_isopen) { ++ ++ mutex_unlock(&context->ctx_lock); ++ delete_context(context); ++ return; ++ } ++ /* If VFD port is open, context will be deleted by vfd_close */ ++ } ++ ++ mutex_unlock(&context->ctx_lock); ++ return; ++} ++ ++/** ++ * Process the incoming packet ++ */ ++static void incoming_packet(struct sasem_context *context, ++ struct urb *urb) ++{ ++ int len = urb->actual_length; ++ unsigned char *buf = urb->transfer_buffer; ++ long ms; ++ struct timeval tv; ++ ++ if (len != 8) { ++ printk(KERN_WARNING "%s: invalid incoming packet size (%d)\n", ++ __func__, len); ++ return; ++ } ++ ++#ifdef DEBUG ++ int i; ++ for (i = 0; i < 8; ++i) ++ printk(KERN_INFO "%02x ", buf[i]); ++ printk(KERN_INFO "\n"); ++#endif ++ ++ /* ++ * Lirc could deal with the repeat code, but we really need to block it ++ * if it arrives too late. Otherwise we could repeat the wrong code. ++ */ ++ ++ /* get the time since the last button press */ ++ do_gettimeofday(&tv); ++ ms = (tv.tv_sec - context->presstime.tv_sec) * 1000 + ++ (tv.tv_usec - context->presstime.tv_usec) / 1000; ++ ++ if (memcmp(buf, "\x08\0\0\0\0\0\0\0", 8) == 0) { ++ /* ++ * the repeat code is being sent, so we copy ++ * the old code to LIRC ++ */ ++ ++ /* ++ * NOTE: Only if the last code was less than 250ms ago ++ * - no one should be able to push another (undetected) button ++ * in that time and then get a false repeat of the previous ++ * press but it is long enough for a genuine repeat ++ */ ++ if ((ms < 250) && (context->codesaved != 0)) { ++ memcpy(buf, &context->lastcode, 8); ++ context->presstime.tv_sec = tv.tv_sec; ++ context->presstime.tv_usec = tv.tv_usec; ++ } ++ } else { ++ /* save the current valid code for repeats */ ++ memcpy(&context->lastcode, buf, 8); ++ /* ++ * set flag to signal a valid code was save; ++ * just for safety reasons ++ */ ++ context->codesaved = 1; ++ context->presstime.tv_sec = tv.tv_sec; ++ context->presstime.tv_usec = tv.tv_usec; ++ } ++ ++ lirc_buffer_write(context->driver->rbuf, buf); ++ wake_up(&context->driver->rbuf->wait_poll); ++} ++ ++/** ++ * Callback function for USB core API: receive data ++ */ ++static void usb_rx_callback(struct urb *urb) ++{ ++ struct sasem_context *context; ++ ++ if (!urb) ++ return; ++ context = (struct sasem_context *) urb->context; ++ if (!context) ++ return; ++ ++ switch (urb->status) { ++ ++ case -ENOENT: /* usbcore unlink successful! */ ++ return; ++ ++ case 0: ++ if (context->ir_isopen) ++ incoming_packet(context, urb); ++ break; ++ ++ default: ++ printk(KERN_WARNING "%s: status (%d): ignored", ++ __func__, urb->status); ++ break; ++ } ++ ++ usb_submit_urb(context->rx_urb, GFP_ATOMIC); ++ return; ++} ++ ++ ++ ++/** ++ * Callback function for USB core API: Probe ++ */ ++static int sasem_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = NULL; ++ struct usb_host_interface *iface_desc = NULL; ++ struct usb_endpoint_descriptor *rx_endpoint = NULL; ++ struct usb_endpoint_descriptor *tx_endpoint = NULL; ++ struct urb *rx_urb = NULL; ++ struct urb *tx_urb = NULL; ++ struct lirc_driver *driver = NULL; ++ struct lirc_buffer *rbuf = NULL; ++ int lirc_minor = 0; ++ int num_endpoints; ++ int retval = 0; ++ int vfd_ep_found; ++ int ir_ep_found; ++ int alloc_status; ++ struct sasem_context *context = NULL; ++ int i; ++ ++ printk(KERN_INFO "%s: found Sasem device\n", __func__); ++ ++ ++ dev = usb_get_dev(interface_to_usbdev(interface)); ++ iface_desc = interface->cur_altsetting; ++ num_endpoints = iface_desc->desc.bNumEndpoints; ++ ++ /* ++ * Scan the endpoint list and set: ++ * first input endpoint = IR endpoint ++ * first output endpoint = VFD endpoint ++ */ ++ ++ ir_ep_found = 0; ++ vfd_ep_found = 0; ++ ++ for (i = 0; i < num_endpoints && !(ir_ep_found && vfd_ep_found); ++i) { ++ ++ struct usb_endpoint_descriptor *ep; ++ int ep_dir; ++ int ep_type; ++ ep = &iface_desc->endpoint [i].desc; ++ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ++ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if (!ir_ep_found && ++ ep_dir == USB_DIR_IN && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ rx_endpoint = ep; ++ ir_ep_found = 1; ++ if (debug) ++ printk(KERN_INFO "%s: found IR endpoint\n", ++ __func__); ++ ++ } else if (!vfd_ep_found && ++ ep_dir == USB_DIR_OUT && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ tx_endpoint = ep; ++ vfd_ep_found = 1; ++ if (debug) ++ printk(KERN_INFO "%s: found VFD endpoint\n", ++ __func__); ++ } ++ } ++ ++ /* Input endpoint is mandatory */ ++ if (!ir_ep_found) { ++ ++ err("%s: no valid input (IR) endpoint found.", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (!vfd_ep_found) ++ printk(KERN_INFO "%s: no valid output (VFD) endpoint found.\n", ++ __func__); ++ ++ ++ /* Allocate memory */ ++ alloc_status = 0; ++ ++ context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL); ++ if (!context) { ++ err("%s: kzalloc failed for context", __func__); ++ alloc_status = 1; ++ goto alloc_status_switch; ++ } ++ driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); ++ if (!driver) { ++ err("%s: kzalloc failed for lirc_driver", __func__); ++ alloc_status = 2; ++ goto alloc_status_switch; ++ } ++ rbuf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!rbuf) { ++ err("%s: kmalloc failed for lirc_buffer", __func__); ++ alloc_status = 3; ++ goto alloc_status_switch; ++ } ++ if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) { ++ err("%s: lirc_buffer_init failed", __func__); ++ alloc_status = 4; ++ goto alloc_status_switch; ++ } ++ rx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!rx_urb) { ++ err("%s: usb_alloc_urb failed for IR urb", __func__); ++ alloc_status = 5; ++ goto alloc_status_switch; ++ } ++ if (vfd_ep_found) { ++ tx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!tx_urb) { ++ err("%s: usb_alloc_urb failed for VFD urb", ++ __func__); ++ alloc_status = 6; ++ goto alloc_status_switch; ++ } ++ } ++ ++ mutex_init(&context->ctx_lock); ++ ++ strcpy(driver->name, MOD_NAME); ++ driver->minor = -1; ++ driver->code_length = 64; ++ driver->sample_rate = 0; ++ driver->features = LIRC_CAN_REC_LIRCCODE; ++ driver->data = context; ++ driver->rbuf = rbuf; ++ driver->set_use_inc = ir_open; ++ driver->set_use_dec = ir_close; ++ driver->dev = &interface->dev; ++ driver->owner = THIS_MODULE; ++ ++ mutex_lock(&context->ctx_lock); ++ ++ lirc_minor = lirc_register_driver(driver); ++ if (lirc_minor < 0) { ++ err("%s: lirc_register_driver failed", __func__); ++ alloc_status = 7; ++ mutex_unlock(&context->ctx_lock); ++ } else ++ printk(KERN_INFO "%s: Registered Sasem driver (minor:%d)\n", ++ __func__, lirc_minor); ++ ++alloc_status_switch: ++ ++ switch (alloc_status) { ++ ++ case 7: ++ if (vfd_ep_found) ++ usb_free_urb(tx_urb); ++ case 6: ++ usb_free_urb(rx_urb); ++ case 5: ++ lirc_buffer_free(rbuf); ++ case 4: ++ kfree(rbuf); ++ case 3: ++ kfree(driver); ++ case 2: ++ kfree(context); ++ context = NULL; ++ case 1: ++ retval = -ENOMEM; ++ goto exit; ++ } ++ ++ /* Needed while unregistering! */ ++ driver->minor = lirc_minor; ++ ++ context->dev = dev; ++ context->dev_present = 1; ++ context->rx_endpoint = rx_endpoint; ++ context->rx_urb = rx_urb; ++ if (vfd_ep_found) { ++ context->tx_endpoint = tx_endpoint; ++ context->tx_urb = tx_urb; ++ context->vfd_contrast = 1000; /* range 0 - 1000 */ ++ } ++ context->driver = driver; ++ ++ usb_set_intfdata(interface, context); ++ ++ if (vfd_ep_found) { ++ ++ if (debug) ++ printk(KERN_INFO "Registering VFD with sysfs\n"); ++ if (usb_register_dev(interface, &sasem_class)) ++ /* Not a fatal error, so ignore */ ++ printk(KERN_INFO "%s: could not get a minor number " ++ "for VFD\n", __func__); ++ } ++ ++ printk(KERN_INFO "%s: Sasem device on usb<%d:%d> initialized\n", ++ __func__, dev->bus->busnum, dev->devnum); ++ ++ mutex_unlock(&context->ctx_lock); ++exit: ++ return retval; ++} ++ ++/** ++ * Callback function for USB core API: disonnect ++ */ ++static void sasem_disconnect(struct usb_interface *interface) ++{ ++ struct sasem_context *context; ++ ++ /* prevent races with ir_open()/vfd_open() */ ++ mutex_lock(&disconnect_lock); ++ ++ context = usb_get_intfdata(interface); ++ mutex_lock(&context->ctx_lock); ++ ++ printk(KERN_INFO "%s: Sasem device disconnected\n", __func__); ++ ++ usb_set_intfdata(interface, NULL); ++ context->dev_present = 0; ++ ++ /* Stop reception */ ++ usb_kill_urb(context->rx_urb); ++ ++ /* Abort ongoing write */ ++ if (atomic_read(&context->tx.busy)) { ++ ++ usb_kill_urb(context->tx_urb); ++ wait_for_completion(&context->tx.finished); ++ } ++ ++ /* De-register from lirc_dev if IR port is not open */ ++ if (!context->ir_isopen) ++ deregister_from_lirc(context); ++ ++ usb_deregister_dev(interface, &sasem_class); ++ ++ mutex_unlock(&context->ctx_lock); ++ ++ if (!context->ir_isopen && !context->vfd_isopen) ++ delete_context(context); ++ ++ mutex_unlock(&disconnect_lock); ++} ++ ++static int __init sasem_init(void) ++{ ++ int rc; ++ ++ printk(KERN_INFO MOD_DESC ", v" MOD_VERSION "\n"); ++ printk(KERN_INFO MOD_AUTHOR "\n"); ++ ++ rc = usb_register(&sasem_driver); ++ if (rc < 0) { ++ err("%s: usb register failed (%d)", __func__, rc); ++ return -ENODEV; ++ } ++ return 0; ++} ++ ++static void __exit sasem_exit(void) ++{ ++ usb_deregister(&sasem_driver); ++ printk(KERN_INFO "module removed. Goodbye!\n"); ++} ++ ++ ++module_init(sasem_init); ++module_exit(sasem_exit); +diff --git a/drivers/input/lirc/lirc_serial.c b/drivers/input/lirc/lirc_serial.c +new file mode 100644 +index 0000000..f4fcc37 +--- /dev/null ++++ b/drivers/input/lirc/lirc_serial.c +@@ -0,0 +1,1317 @@ ++/* ++ * lirc_serial.c ++ * ++ * lirc_serial - Device driver that records pulse- and pause-lengths ++ * (space-lengths) between DDCD event on a serial port. ++ * ++ * Copyright (C) 1996,97 Ralph Metzler ++ * Copyright (C) 1998 Trent Piepho ++ * Copyright (C) 1998 Ben Pfaff ++ * Copyright (C) 1999 Christoph Bartelmus ++ * Copyright (C) 2007 Andrei Tanas (suspend/resume support) ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++/* ++ * Steve's changes to improve transmission fidelity: ++ * - for systems with the rdtsc instruction and the clock counter, a ++ * send_pule that times the pulses directly using the counter. ++ * This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is ++ * not needed. Measurement shows very stable waveform, even where ++ * PCI activity slows the access to the UART, which trips up other ++ * versions. ++ * - For other system, non-integer-microsecond pulse/space lengths, ++ * done using fixed point binary. So, much more accurate carrier ++ * frequency. ++ * - fine tuned transmitter latency, taking advantage of fractional ++ * microseconds in previous change ++ * - Fixed bug in the way transmitter latency was accounted for by ++ * tuning the pulse lengths down - the send_pulse routine ignored ++ * this overhead as it timed the overall pulse length - so the ++ * pulse frequency was right but overall pulse length was too ++ * long. Fixed by accounting for latency on each pulse/space ++ * iteration. ++ * ++ * Steve Davies July 2001 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++#include ++#endif ++/* From Intel IXP42X Developer's Manual (#252480-005): */ ++/* ftp://download.intel.com/design/network/manuals/25248005.pdf */ ++#define UART_IE_IXP42X_UUE 0x40 /* IXP42X UART Unit enable */ ++#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */ ++ ++#include ++#include "lirc_dev.h" ++ ++#define LIRC_DRIVER_NAME "lirc_serial" ++ ++struct lirc_serial { ++ int signal_pin; ++ int signal_pin_change; ++ u8 on; ++ u8 off; ++ long (*send_pulse)(unsigned long length); ++ void (*send_space)(long length); ++ int features; ++ spinlock_t lock; ++}; ++ ++#define LIRC_HOMEBREW 0 ++#define LIRC_IRDEO 1 ++#define LIRC_IRDEO_REMOTE 2 ++#define LIRC_ANIMAX 3 ++#define LIRC_IGOR 4 ++#define LIRC_NSLU2 5 ++ ++/*** module parameters ***/ ++static int type; ++static int io; ++static int irq; ++static int iommap; ++static int ioshift; ++static int softcarrier = 1; ++static int share_irq; ++static int debug; ++static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */ ++static int txsense; /* 0 = active high, 1 = active low */ ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++/* forward declarations */ ++static long send_pulse_irdeo(unsigned long length); ++static long send_pulse_homebrew(unsigned long length); ++static void send_space_irdeo(long length); ++static void send_space_homebrew(long length); ++ ++static struct lirc_serial hardware[] = { ++ [LIRC_HOMEBREW] = { ++ .signal_pin = UART_MSR_DCD, ++ .signal_pin_change = UART_MSR_DDCD, ++ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR), ++ .off = (UART_MCR_RTS | UART_MCR_OUT2), ++ .send_pulse = send_pulse_homebrew, ++ .send_space = send_space_homebrew, ++#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER ++ .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) ++#else ++ .features = LIRC_CAN_REC_MODE2 ++#endif ++ }, ++ ++ [LIRC_IRDEO] = { ++ .signal_pin = UART_MSR_DSR, ++ .signal_pin_change = UART_MSR_DDSR, ++ .on = UART_MCR_OUT2, ++ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), ++ .send_pulse = send_pulse_irdeo, ++ .send_space = send_space_irdeo, ++ .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) ++ }, ++ ++ [LIRC_IRDEO_REMOTE] = { ++ .signal_pin = UART_MSR_DSR, ++ .signal_pin_change = UART_MSR_DDSR, ++ .on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), ++ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), ++ .send_pulse = send_pulse_irdeo, ++ .send_space = send_space_irdeo, ++ .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) ++ }, ++ ++ [LIRC_ANIMAX] = { ++ .signal_pin = UART_MSR_DCD, ++ .signal_pin_change = UART_MSR_DDCD, ++ .on = 0, ++ .off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2), ++ .send_pulse = NULL, ++ .send_space = NULL, ++ .features = LIRC_CAN_REC_MODE2 ++ }, ++ ++ [LIRC_IGOR] = { ++ .signal_pin = UART_MSR_DSR, ++ .signal_pin_change = UART_MSR_DDSR, ++ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR), ++ .off = (UART_MCR_RTS | UART_MCR_OUT2), ++ .send_pulse = send_pulse_homebrew, ++ .send_space = send_space_homebrew, ++#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER ++ .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) ++#else ++ .features = LIRC_CAN_REC_MODE2 ++#endif ++ }, ++ ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++ /* ++ * Modified Linksys Network Storage Link USB 2.0 (NSLU2): ++ * We receive on CTS of the 2nd serial port (R142,LHS), we ++ * transmit with a IR diode between GPIO[1] (green status LED), ++ * and ground (Matthias Goebl ). ++ * See also http://www.nslu2-linux.org for this device ++ */ ++ [LIRC_NSLU2] = { ++ .signal_pin = UART_MSR_CTS, ++ .signal_pin_change = UART_MSR_DCTS, ++ .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR), ++ .off = (UART_MCR_RTS | UART_MCR_OUT2), ++ .send_pulse = send_pulse_homebrew, ++ .send_space = send_space_homebrew, ++#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER ++ .features = (LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2) ++#else ++ .features = LIRC_CAN_REC_MODE2 ++#endif ++ }, ++#endif ++ ++}; ++ ++#define RS_ISR_PASS_LIMIT 256 ++ ++/* ++ * A long pulse code from a remote might take up to 300 bytes. The ++ * daemon should read the bytes as soon as they are generated, so take ++ * the number of keys you think you can push before the daemon runs ++ * and multiply by 300. The driver will warn you if you overrun this ++ * buffer. If you have a slow computer or non-busmastering IDE disks, ++ * maybe you will need to increase this. ++ */ ++ ++/* This MUST be a power of two! It has to be larger than 1 as well. */ ++ ++#define RBUF_LEN 256 ++#define WBUF_LEN 256 ++ ++static struct timeval lasttv = {0, 0}; ++ ++static struct lirc_buffer rbuf; ++ ++static int wbuf[WBUF_LEN]; ++ ++static unsigned int freq = 38000; ++static unsigned int duty_cycle = 50; ++ ++/* Initialized in init_timing_params() */ ++static unsigned long period; ++static unsigned long pulse_width; ++static unsigned long space_width; ++ ++#if defined(__i386__) ++/* ++ * From: ++ * Linux I/O port programming mini-HOWTO ++ * Author: Riku Saikkonen ++ * v, 28 December 1997 ++ * ++ * [...] ++ * Actually, a port I/O instruction on most ports in the 0-0x3ff range ++ * takes almost exactly 1 microsecond, so if you're, for example, using ++ * the parallel port directly, just do additional inb()s from that port ++ * to delay. ++ * [...] ++ */ ++/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from ++ * comment above plus trimming to match actual measured frequency. ++ * This will be sensitive to cpu speed, though hopefully most of the 1.5us ++ * is spent in the uart access. Still - for reference test machine was a ++ * 1.13GHz Athlon system - Steve ++ */ ++ ++/* ++ * changed from 400 to 450 as this works better on slower machines; ++ * faster machines will use the rdtsc code anyway ++ */ ++#define LIRC_SERIAL_TRANSMITTER_LATENCY 450 ++ ++#else ++ ++/* does anybody have information on other platforms ? */ ++/* 256 = 1<<8 */ ++#define LIRC_SERIAL_TRANSMITTER_LATENCY 256 ++ ++#endif /* __i386__ */ ++/* ++ * FIXME: should we be using hrtimers instead of this ++ * LIRC_SERIAL_TRANSMITTER_LATENCY nonsense? ++ */ ++ ++/* fetch serial input packet (1 byte) from register offset */ ++static u8 sinp(int offset) ++{ ++ if (iommap != 0) ++ /* the register is memory-mapped */ ++ offset <<= ioshift; ++ ++ return inb(io + offset); ++} ++ ++/* write serial output packet (1 byte) of value to register offset */ ++static void soutp(int offset, u8 value) ++{ ++ if (iommap != 0) ++ /* the register is memory-mapped */ ++ offset <<= ioshift; ++ ++ outb(value, io + offset); ++} ++ ++static void on(void) ++{ ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++ /* ++ * On NSLU2, we put the transmit diode between the output of the green ++ * status LED and ground ++ */ ++ if (type == LIRC_NSLU2) { ++ gpio_line_set(NSLU2_LED_GRN, IXP4XX_GPIO_LOW); ++ return; ++ } ++#endif ++ if (txsense) ++ soutp(UART_MCR, hardware[type].off); ++ else ++ soutp(UART_MCR, hardware[type].on); ++} ++ ++static void off(void) ++{ ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++ if (type == LIRC_NSLU2) { ++ gpio_line_set(NSLU2_LED_GRN, IXP4XX_GPIO_HIGH); ++ return; ++ } ++#endif ++ if (txsense) ++ soutp(UART_MCR, hardware[type].on); ++ else ++ soutp(UART_MCR, hardware[type].off); ++} ++ ++#ifndef MAX_UDELAY_MS ++#define MAX_UDELAY_US 5000 ++#else ++#define MAX_UDELAY_US (MAX_UDELAY_MS*1000) ++#endif ++ ++static void safe_udelay(unsigned long usecs) ++{ ++ while (usecs > MAX_UDELAY_US) { ++ udelay(MAX_UDELAY_US); ++ usecs -= MAX_UDELAY_US; ++ } ++ udelay(usecs); ++} ++ ++#ifdef USE_RDTSC ++/* ++ * This is an overflow/precision juggle, complicated in that we can't ++ * do long long divide in the kernel ++ */ ++ ++/* ++ * When we use the rdtsc instruction to measure clocks, we keep the ++ * pulse and space widths as clock cycles. As this is CPU speed ++ * dependent, the widths must be calculated in init_port and ioctl ++ * time ++ */ ++ ++/* So send_pulse can quickly convert microseconds to clocks */ ++static unsigned long conv_us_to_clocks; ++ ++static int init_timing_params(unsigned int new_duty_cycle, ++ unsigned int new_freq) ++{ ++ unsigned long long loops_per_sec, work; ++ ++ duty_cycle = new_duty_cycle; ++ freq = new_freq; ++ ++ loops_per_sec = current_cpu_data.loops_per_jiffy; ++ loops_per_sec *= HZ; ++ ++ /* How many clocks in a microsecond?, avoiding long long divide */ ++ work = loops_per_sec; ++ work *= 4295; /* 4295 = 2^32 / 1e6 */ ++ conv_us_to_clocks = (work >> 32); ++ ++ /* ++ * Carrier period in clocks, approach good up to 32GHz clock, ++ * gets carrier frequency within 8Hz ++ */ ++ period = loops_per_sec >> 3; ++ period /= (freq >> 3); ++ ++ /* Derive pulse and space from the period */ ++ pulse_width = period * duty_cycle / 100; ++ space_width = period - pulse_width; ++ dprintk("in init_timing_params, freq=%d, duty_cycle=%d, " ++ "clk/jiffy=%ld, pulse=%ld, space=%ld, " ++ "conv_us_to_clocks=%ld\n", ++ freq, duty_cycle, current_cpu_data.loops_per_jiffy, ++ pulse_width, space_width, conv_us_to_clocks); ++ return 0; ++} ++#else /* ! USE_RDTSC */ ++static int init_timing_params(unsigned int new_duty_cycle, ++ unsigned int new_freq) ++{ ++/* ++ * period, pulse/space width are kept with 8 binary places - ++ * IE multiplied by 256. ++ */ ++ if (256 * 1000000L / new_freq * new_duty_cycle / 100 <= ++ LIRC_SERIAL_TRANSMITTER_LATENCY) ++ return -EINVAL; ++ if (256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <= ++ LIRC_SERIAL_TRANSMITTER_LATENCY) ++ return -EINVAL; ++ duty_cycle = new_duty_cycle; ++ freq = new_freq; ++ period = 256 * 1000000L / freq; ++ pulse_width = period * duty_cycle / 100; ++ space_width = period - pulse_width; ++ dprintk("in init_timing_params, freq=%d pulse=%ld, " ++ "space=%ld\n", freq, pulse_width, space_width); ++ return 0; ++} ++#endif /* USE_RDTSC */ ++ ++ ++/* return value: space length delta */ ++ ++static long send_pulse_irdeo(unsigned long length) ++{ ++ long rawbits, ret; ++ int i; ++ unsigned char output; ++ unsigned char chunk, shifted; ++ ++ /* how many bits have to be sent ? */ ++ rawbits = length * 1152 / 10000; ++ if (duty_cycle > 50) ++ chunk = 3; ++ else ++ chunk = 1; ++ for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) { ++ shifted = chunk << (i * 3); ++ shifted >>= 1; ++ output &= (~shifted); ++ i++; ++ if (i == 3) { ++ soutp(UART_TX, output); ++ while (!(sinp(UART_LSR) & UART_LSR_THRE)) ++ ; ++ output = 0x7f; ++ i = 0; ++ } ++ } ++ if (i != 0) { ++ soutp(UART_TX, output); ++ while (!(sinp(UART_LSR) & UART_LSR_TEMT)) ++ ; ++ } ++ ++ if (i == 0) ++ ret = (-rawbits) * 10000 / 1152; ++ else ++ ret = (3 - i) * 3 * 10000 / 1152 + (-rawbits) * 10000 / 1152; ++ ++ return ret; ++} ++ ++#ifdef USE_RDTSC ++/* Version that uses Pentium rdtsc instruction to measure clocks */ ++ ++/* ++ * This version does sub-microsecond timing using rdtsc instruction, ++ * and does away with the fudged LIRC_SERIAL_TRANSMITTER_LATENCY ++ * Implicitly i586 architecture... - Steve ++ */ ++ ++static long send_pulse_homebrew_softcarrier(unsigned long length) ++{ ++ int flag; ++ unsigned long target, start, now; ++ ++ /* Get going quick as we can */ ++ rdtscl(start); ++ on(); ++ /* Convert length from microseconds to clocks */ ++ length *= conv_us_to_clocks; ++ /* And loop till time is up - flipping at right intervals */ ++ now = start; ++ target = pulse_width; ++ flag = 1; ++ /* ++ * FIXME: This looks like a hard busy wait, without even an occasional, ++ * polite, cpu_relax() call. There's got to be a better way? ++ * ++ * The i2c code has the result of a lot of bit-banging work, I wonder if ++ * there's something there which could be helpful here. ++ */ ++ while ((now - start) < length) { ++ /* Delay till flip time */ ++ do { ++ rdtscl(now); ++ } while ((now - start) < target); ++ ++ /* flip */ ++ if (flag) { ++ rdtscl(now); ++ off(); ++ target += space_width; ++ } else { ++ rdtscl(now); on(); ++ target += pulse_width; ++ } ++ flag = !flag; ++ } ++ rdtscl(now); ++ return ((now - start) - length) / conv_us_to_clocks; ++} ++#else /* ! USE_RDTSC */ ++/* Version using udelay() */ ++ ++/* ++ * here we use fixed point arithmetic, with 8 ++ * fractional bits. that gets us within 0.1% or so of the right average ++ * frequency, albeit with some jitter in pulse length - Steve ++ */ ++ ++/* To match 8 fractional bits used for pulse/space length */ ++ ++static long send_pulse_homebrew_softcarrier(unsigned long length) ++{ ++ int flag; ++ unsigned long actual, target, d; ++ length <<= 8; ++ ++ actual = 0; target = 0; flag = 0; ++ while (actual < length) { ++ if (flag) { ++ off(); ++ target += space_width; ++ } else { ++ on(); ++ target += pulse_width; ++ } ++ d = (target - actual - ++ LIRC_SERIAL_TRANSMITTER_LATENCY + 128) >> 8; ++ /* ++ * Note - we've checked in ioctl that the pulse/space ++ * widths are big enough so that d is > 0 ++ */ ++ udelay(d); ++ actual += (d << 8) + LIRC_SERIAL_TRANSMITTER_LATENCY; ++ flag = !flag; ++ } ++ return (actual-length) >> 8; ++} ++#endif /* USE_RDTSC */ ++ ++static long send_pulse_homebrew(unsigned long length) ++{ ++ if (length <= 0) ++ return 0; ++ ++ if (softcarrier) ++ return send_pulse_homebrew_softcarrier(length); ++ else { ++ on(); ++ safe_udelay(length); ++ return 0; ++ } ++} ++ ++static void send_space_irdeo(long length) ++{ ++ if (length <= 0) ++ return; ++ ++ safe_udelay(length); ++} ++ ++static void send_space_homebrew(long length) ++{ ++ off(); ++ if (length <= 0) ++ return; ++ safe_udelay(length); ++} ++ ++static void rbwrite(int l) ++{ ++ if (lirc_buffer_full(&rbuf)) { ++ /* no new signals will be accepted */ ++ dprintk("Buffer overrun\n"); ++ return; ++ } ++ lirc_buffer_write(&rbuf, (void *)&l); ++} ++ ++static void frbwrite(int l) ++{ ++ /* simple noise filter */ ++ static int pulse, space; ++ static unsigned int ptr; ++ ++ if (ptr > 0 && (l & PULSE_BIT)) { ++ pulse += l & PULSE_MASK; ++ if (pulse > 250) { ++ rbwrite(space); ++ rbwrite(pulse | PULSE_BIT); ++ ptr = 0; ++ pulse = 0; ++ } ++ return; ++ } ++ if (!(l & PULSE_BIT)) { ++ if (ptr == 0) { ++ if (l > 20000) { ++ space = l; ++ ptr++; ++ return; ++ } ++ } else { ++ if (l > 20000) { ++ space += pulse; ++ if (space > PULSE_MASK) ++ space = PULSE_MASK; ++ space += l; ++ if (space > PULSE_MASK) ++ space = PULSE_MASK; ++ pulse = 0; ++ return; ++ } ++ rbwrite(space); ++ rbwrite(pulse | PULSE_BIT); ++ ptr = 0; ++ pulse = 0; ++ } ++ } ++ rbwrite(l); ++} ++ ++static irqreturn_t irq_handler(int i, void *blah) ++{ ++ struct timeval tv; ++ int counter, dcd; ++ u8 status; ++ long deltv; ++ int data; ++ static int last_dcd = -1; ++ ++ if ((sinp(UART_IIR) & UART_IIR_NO_INT)) { ++ /* not our interrupt */ ++ return IRQ_NONE; ++ } ++ ++ counter = 0; ++ do { ++ counter++; ++ status = sinp(UART_MSR); ++ if (counter > RS_ISR_PASS_LIMIT) { ++ printk(KERN_WARNING LIRC_DRIVER_NAME ": AIEEEE: " ++ "We're caught!\n"); ++ break; ++ } ++ if ((status & hardware[type].signal_pin_change) ++ && sense != -1) { ++ /* get current time */ ++ do_gettimeofday(&tv); ++ ++ /* New mode, written by Trent Piepho ++ . */ ++ ++ /* ++ * The old format was not very portable. ++ * We now use an int to pass pulses ++ * and spaces to user space. ++ * ++ * If PULSE_BIT is set a pulse has been ++ * received, otherwise a space has been ++ * received. The driver needs to know if your ++ * receiver is active high or active low, or ++ * the space/pulse sense could be ++ * inverted. The bits denoted by PULSE_MASK are ++ * the length in microseconds. Lengths greater ++ * than or equal to 16 seconds are clamped to ++ * PULSE_MASK. All other bits are unused. ++ * This is a much simpler interface for user ++ * programs, as well as eliminating "out of ++ * phase" errors with space/pulse ++ * autodetection. ++ */ ++ ++ /* calc time since last interrupt in microseconds */ ++ dcd = (status & hardware[type].signal_pin) ? 1 : 0; ++ ++ if (dcd == last_dcd) { ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": ignoring spike: %d %d %lx %lx %lx %lx\n", ++ dcd, sense, ++ tv.tv_sec, lasttv.tv_sec, ++ tv.tv_usec, lasttv.tv_usec); ++ continue; ++ } ++ ++ deltv = tv.tv_sec-lasttv.tv_sec; ++ if (tv.tv_sec < lasttv.tv_sec || ++ (tv.tv_sec == lasttv.tv_sec && ++ tv.tv_usec < lasttv.tv_usec)) { ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": AIEEEE: your clock just jumped " ++ "backwards\n"); ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": %d %d %lx %lx %lx %lx\n", ++ dcd, sense, ++ tv.tv_sec, lasttv.tv_sec, ++ tv.tv_usec, lasttv.tv_usec); ++ data = PULSE_MASK; ++ } else if (deltv > 15) { ++ data = PULSE_MASK; /* really long time */ ++ if (!(dcd^sense)) { ++ /* sanity check */ ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": AIEEEE: " ++ "%d %d %lx %lx %lx %lx\n", ++ dcd, sense, ++ tv.tv_sec, lasttv.tv_sec, ++ tv.tv_usec, lasttv.tv_usec); ++ /* ++ * detecting pulse while this ++ * MUST be a space! ++ */ ++ sense = sense ? 0 : 1; ++ } ++ } else ++ data = (int) (deltv*1000000 + ++ tv.tv_usec - ++ lasttv.tv_usec); ++ frbwrite(dcd^sense ? data : (data|PULSE_BIT)); ++ lasttv = tv; ++ last_dcd = dcd; ++ wake_up_interruptible(&rbuf.wait_poll); ++ } ++ } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */ ++ return IRQ_HANDLED; ++} ++ ++ ++static int hardware_init_port(void) ++{ ++ u8 scratch, scratch2, scratch3; ++ ++ /* ++ * This is a simple port existence test, borrowed from the autoconfig ++ * function in drivers/serial/8250.c ++ */ ++ scratch = sinp(UART_IER); ++ soutp(UART_IER, 0); ++#ifdef __i386__ ++ outb(0xff, 0x080); ++#endif ++ scratch2 = sinp(UART_IER) & 0x0f; ++ soutp(UART_IER, 0x0f); ++#ifdef __i386__ ++ outb(0x00, 0x080); ++#endif ++ scratch3 = sinp(UART_IER) & 0x0f; ++ soutp(UART_IER, scratch); ++ if (scratch2 != 0 || scratch3 != 0x0f) { ++ /* we fail, there's nothing here */ ++ printk(KERN_ERR LIRC_DRIVER_NAME ": port existence test " ++ "failed, cannot continue\n"); ++ return -EINVAL; ++ } ++ ++ ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* First of all, disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ ++ /* Clear registers. */ ++ sinp(UART_LSR); ++ sinp(UART_RX); ++ sinp(UART_IIR); ++ sinp(UART_MSR); ++ ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++ if (type == LIRC_NSLU2) { ++ /* Setup NSLU2 UART */ ++ ++ /* Enable UART */ ++ soutp(UART_IER, sinp(UART_IER) | UART_IE_IXP42X_UUE); ++ /* Disable Receiver data Time out interrupt */ ++ soutp(UART_IER, sinp(UART_IER) & ~UART_IE_IXP42X_RTOIE); ++ /* set out2 = interrupt unmask; off() doesn't set MCR ++ on NSLU2 */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ } ++#endif ++ ++ /* Set line for power source */ ++ off(); ++ ++ /* Clear registers again to be sure. */ ++ sinp(UART_LSR); ++ sinp(UART_RX); ++ sinp(UART_IIR); ++ sinp(UART_MSR); ++ ++ switch (type) { ++ case LIRC_IRDEO: ++ case LIRC_IRDEO_REMOTE: ++ /* setup port to 7N1 @ 115200 Baud */ ++ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */ ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ /* Set DLAB 0 + 7N1 */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ /* THR interrupt already disabled at this point */ ++ break; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++static int init_port(void) ++{ ++ int i, nlow, nhigh; ++ ++ /* Reserve io region. */ ++ /* ++ * Future MMAP-Developers: Attention! ++ * For memory mapped I/O you *might* need to use ioremap() first, ++ * for the NSLU2 it's done in boot code. ++ */ ++ if (((iommap != 0) ++ && (request_mem_region(iommap, 8 << ioshift, ++ LIRC_DRIVER_NAME) == NULL)) ++ || ((iommap == 0) ++ && (request_region(io, 8, LIRC_DRIVER_NAME) == NULL))) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": port %04x already in use\n", io); ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": use 'setserial /dev/ttySX uart none'\n"); ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": or compile the serial port driver as module and\n"); ++ printk(KERN_WARNING LIRC_DRIVER_NAME ++ ": make sure this module is loaded first\n"); ++ return -EBUSY; ++ } ++ ++ if (hardware_init_port() < 0) ++ return -EINVAL; ++ ++ /* Initialize pulse/space widths */ ++ init_timing_params(duty_cycle, freq); ++ ++ /* If pin is high, then this must be an active low receiver. */ ++ if (sense == -1) { ++ /* wait 1/2 sec for the power supply */ ++ msleep(500); ++ ++ /* ++ * probe 9 times every 0.04s, collect "votes" for ++ * active high/low ++ */ ++ nlow = 0; ++ nhigh = 0; ++ for (i = 0; i < 9; i++) { ++ if (sinp(UART_MSR) & hardware[type].signal_pin) ++ nlow++; ++ else ++ nhigh++; ++ msleep(40); ++ } ++ sense = (nlow >= nhigh ? 1 : 0); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active " ++ "%s receiver\n", sense ? "low" : "high"); ++ } else ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active " ++ "%s receiver\n", sense ? "low" : "high"); ++ ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ int result; ++ unsigned long flags; ++ ++ /* initialize timestamp */ ++ do_gettimeofday(&lasttv); ++ ++ result = request_irq(irq, irq_handler, ++ IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0), ++ LIRC_DRIVER_NAME, (void *)&hardware); ++ ++ switch (result) { ++ case -EBUSY: ++ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq); ++ return -EBUSY; ++ case -EINVAL: ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": Bad irq number or handler\n"); ++ return -EINVAL; ++ default: ++ dprintk("Interrupt %d, port %04x obtained\n", irq, io); ++ break; ++ }; ++ ++ spin_lock_irqsave(&hardware[type].lock, flags); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); ++ ++ spin_unlock_irqrestore(&hardware[type].lock, flags); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware[type].lock, flags); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* First of all, disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ spin_unlock_irqrestore(&hardware[type].lock, flags); ++ ++ free_irq(irq, (void *)&hardware); ++ ++ dprintk("freed IRQ %d\n", irq); ++} ++ ++static ssize_t lirc_write(struct file *file, const char *buf, ++ size_t n, loff_t *ppos) ++{ ++ int i, count; ++ unsigned long flags; ++ long delta = 0; ++ ++ if (!(hardware[type].features&LIRC_CAN_SEND_PULSE)) ++ return -EBADF; ++ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ count = n / sizeof(int); ++ if (count > WBUF_LEN || count % 2 == 0) ++ return -EINVAL; ++ if (copy_from_user(wbuf, buf, n)) ++ return -EFAULT; ++ spin_lock_irqsave(&hardware[type].lock, flags); ++ if (type == LIRC_IRDEO) { ++ /* DTR, RTS down */ ++ on(); ++ } ++ for (i = 0; i < count; i++) { ++ if (i%2) ++ hardware[type].send_space(wbuf[i]-delta); ++ else ++ delta = hardware[type].send_pulse(wbuf[i]); ++ } ++ off(); ++ spin_unlock_irqrestore(&hardware[type].lock, flags); ++ return n; ++} ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ int result; ++ unsigned long value; ++ unsigned int ivalue; ++ ++ switch (cmd) { ++ case LIRC_GET_SEND_MODE: ++ if (!(hardware[type].features&LIRC_CAN_SEND_MASK)) ++ return -ENOIOCTLCMD; ++ ++ result = put_user(LIRC_SEND2MODE ++ (hardware[type].features&LIRC_CAN_SEND_MASK), ++ (unsigned long *) arg); ++ if (result) ++ return result; ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ if (!(hardware[type].features&LIRC_CAN_SEND_MASK)) ++ return -ENOIOCTLCMD; ++ ++ result = get_user(value, (unsigned long *) arg); ++ if (result) ++ return result; ++ /* only LIRC_MODE_PULSE supported */ ++ if (value != LIRC_MODE_PULSE) ++ return -ENOSYS; ++ break; ++ ++ case LIRC_GET_LENGTH: ++ return -ENOSYS; ++ break; ++ ++ case LIRC_SET_SEND_DUTY_CYCLE: ++ dprintk("SET_SEND_DUTY_CYCLE\n"); ++ if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE)) ++ return -ENOIOCTLCMD; ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ if (ivalue <= 0 || ivalue > 100) ++ return -EINVAL; ++ return init_timing_params(ivalue, freq); ++ break; ++ ++ case LIRC_SET_SEND_CARRIER: ++ dprintk("SET_SEND_CARRIER\n"); ++ if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER)) ++ return -ENOIOCTLCMD; ++ ++ result = get_user(ivalue, (unsigned int *) arg); ++ if (result) ++ return result; ++ if (ivalue > 500000 || ivalue < 20000) ++ return -EINVAL; ++ return init_timing_params(duty_cycle, ivalue); ++ break; ++ ++ default: ++ return lirc_dev_fop_ioctl(node, filep, cmd, arg); ++ } ++ return 0; ++} ++ ++static struct file_operations lirc_fops = { ++ .owner = THIS_MODULE, ++ .write = lirc_write, ++ .ioctl = lirc_ioctl, ++ .read = lirc_dev_fop_read, ++ .poll = lirc_dev_fop_poll, ++ .open = lirc_dev_fop_open, ++ .release = lirc_dev_fop_close, ++}; ++ ++static struct lirc_driver driver = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .rbuf = &rbuf, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++static struct platform_device *lirc_serial_dev; ++ ++static int __devinit lirc_serial_probe(struct platform_device *dev) ++{ ++ return 0; ++} ++ ++static int __devexit lirc_serial_remove(struct platform_device *dev) ++{ ++ return 0; ++} ++ ++static int lirc_serial_suspend(struct platform_device *dev, ++ pm_message_t state) ++{ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* Disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ ++ /* Clear registers. */ ++ sinp(UART_LSR); ++ sinp(UART_RX); ++ sinp(UART_IIR); ++ sinp(UART_MSR); ++ ++ return 0; ++} ++ ++/* twisty maze... need a forward-declaration here... */ ++static void lirc_serial_exit(void); ++ ++static int lirc_serial_resume(struct platform_device *dev) ++{ ++ unsigned long flags; ++ ++ if (hardware_init_port() < 0) { ++ lirc_serial_exit(); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&hardware[type].lock, flags); ++ /* Enable Interrupt */ ++ do_gettimeofday(&lasttv); ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI); ++ off(); ++ ++ lirc_buffer_clear(&rbuf); ++ ++ spin_unlock_irqrestore(&hardware[type].lock, flags); ++ ++ return 0; ++} ++ ++static struct platform_driver lirc_serial_driver = { ++ .probe = lirc_serial_probe, ++ .remove = __devexit_p(lirc_serial_remove), ++ .suspend = lirc_serial_suspend, ++ .resume = lirc_serial_resume, ++ .driver = { ++ .name = "lirc_serial", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init lirc_serial_init(void) ++{ ++ int result; ++ ++ /* Init read buffer. */ ++ result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN); ++ if (result < 0) ++ return -ENOMEM; ++ ++ result = platform_driver_register(&lirc_serial_driver); ++ if (result) { ++ printk("lirc register returned %d\n", result); ++ goto exit_buffer_free; ++ } ++ ++ lirc_serial_dev = platform_device_alloc("lirc_serial", 0); ++ if (!lirc_serial_dev) { ++ result = -ENOMEM; ++ goto exit_driver_unregister; ++ } ++ ++ result = platform_device_add(lirc_serial_dev); ++ if (result) ++ goto exit_device_put; ++ ++ return 0; ++ ++exit_device_put: ++ platform_device_put(lirc_serial_dev); ++exit_driver_unregister: ++ platform_driver_unregister(&lirc_serial_driver); ++exit_buffer_free: ++ lirc_buffer_free(&rbuf); ++ return result; ++} ++ ++static void lirc_serial_exit(void) ++{ ++ platform_device_unregister(lirc_serial_dev); ++ platform_driver_unregister(&lirc_serial_driver); ++ lirc_buffer_free(&rbuf); ++} ++ ++static int __init lirc_serial_init_module(void) ++{ ++ int result; ++ ++ result = lirc_serial_init(); ++ if (result) ++ return result; ++ ++ switch (type) { ++ case LIRC_HOMEBREW: ++ case LIRC_IRDEO: ++ case LIRC_IRDEO_REMOTE: ++ case LIRC_ANIMAX: ++ case LIRC_IGOR: ++ /* if nothing specified, use ttyS0/com1 and irq 4 */ ++ io = io ? io : 0x3f8; ++ irq = irq ? irq : 4; ++ break; ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++ case LIRC_NSLU2: ++ io = io ? io : IRQ_IXP4XX_UART2; ++ irq = irq ? irq : (IXP4XX_UART2_BASE_VIRT + REG_OFFSET); ++ iommap = iommap ? iommap : IXP4XX_UART2_BASE_PHYS; ++ ioshift = ioshift ? ioshift : 2; ++ break; ++#endif ++ default: ++ result = -EINVAL; ++ goto exit_serial_exit; ++ } ++ if (!softcarrier) { ++ switch (type) { ++ case LIRC_HOMEBREW: ++ case LIRC_IGOR: ++#ifdef CONFIG_LIRC_SERIAL_NSLU2 ++ case LIRC_NSLU2: ++#endif ++ hardware[type].features &= ++ ~(LIRC_CAN_SET_SEND_DUTY_CYCLE| ++ LIRC_CAN_SET_SEND_CARRIER); ++ break; ++ } ++ } ++ ++ result = init_port(); ++ if (result < 0) ++ goto exit_serial_exit; ++ driver.features = hardware[type].features; ++ driver.dev = &lirc_serial_dev->dev; ++ driver.minor = lirc_register_driver(&driver); ++ if (driver.minor < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": register_chrdev failed!\n"); ++ result = -EIO; ++ goto exit_release; ++ } ++ return 0; ++exit_release: ++ release_region(io, 8); ++exit_serial_exit: ++ lirc_serial_exit(); ++ return result; ++} ++ ++static void __exit lirc_serial_exit_module(void) ++{ ++ lirc_serial_exit(); ++ if (iommap != 0) ++ release_mem_region(iommap, 8 << ioshift); ++ else ++ release_region(io, 8); ++ lirc_unregister_driver(driver.minor); ++ dprintk("cleaned up module\n"); ++} ++ ++ ++module_init(lirc_serial_init_module); ++module_exit(lirc_serial_exit_module); ++ ++MODULE_DESCRIPTION("Infra-red receiver driver for serial ports."); ++MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, " ++ "Christoph Bartelmus, Andrei Tanas"); ++MODULE_LICENSE("GPL"); ++ ++module_param(type, int, S_IRUGO); ++MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo," ++ " 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug," ++ " 5 = NSLU2 RX:CTS2/TX:GreenLED)"); ++ ++module_param(io, int, S_IRUGO); ++MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)"); ++ ++/* some architectures (e.g. intel xscale) have memory mapped registers */ ++module_param(iommap, bool, S_IRUGO); ++MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O" ++ " (0 = no memory mapped io)"); ++ ++/* ++ * some architectures (e.g. intel xscale) align the 8bit serial registers ++ * on 32bit word boundaries. ++ * See linux-kernel/serial/8250.c serial_in()/out() ++ */ ++module_param(ioshift, int, S_IRUGO); ++MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)"); ++ ++module_param(irq, int, S_IRUGO); ++MODULE_PARM_DESC(irq, "Interrupt (4 or 3)"); ++ ++module_param(share_irq, bool, S_IRUGO); ++MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)"); ++ ++module_param(sense, bool, S_IRUGO); ++MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit" ++ " (0 = active high, 1 = active low )"); ++ ++#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER ++module_param(txsense, bool, S_IRUGO); ++MODULE_PARM_DESC(txsense, "Sense of transmitter circuit" ++ " (0 = active high, 1 = active low )"); ++#endif ++ ++module_param(softcarrier, bool, S_IRUGO); ++MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); +diff --git a/drivers/input/lirc/lirc_sir.c b/drivers/input/lirc/lirc_sir.c +new file mode 100644 +index 0000000..4a471d6 +--- /dev/null ++++ b/drivers/input/lirc/lirc_sir.c +@@ -0,0 +1,1283 @@ ++/* ++ * LIRC SIR driver, (C) 2000 Milan Pikula ++ * ++ * lirc_sir - Device driver for use with SIR (serial infra red) ++ * mode of IrDA on many notebooks. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * ++ * 2000/09/16 Frank Przybylski : ++ * added timeout and relaxed pulse detection, removed gap bug ++ * ++ * 2000/12/15 Christoph Bartelmus : ++ * added support for Tekram Irmate 210 (sending does not work yet, ++ * kind of disappointing that nobody was able to implement that ++ * before), ++ * major clean-up ++ * ++ * 2001/02/27 Christoph Bartelmus : ++ * added support for StrongARM SA1100 embedded microprocessor ++ * parts cut'n'pasted from sa1100_ir.c (C) 2000 Russell King ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef LIRC_ON_SA1100 ++#include ++#ifdef CONFIG_SA1100_COLLIE ++#include ++#include ++#endif ++#endif ++ ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++/* SECTION: Definitions */ ++ ++/*** Tekram dongle ***/ ++#ifdef LIRC_SIR_TEKRAM ++/* stolen from kernel source */ ++/* definitions for Tekram dongle */ ++#define TEKRAM_115200 0x00 ++#define TEKRAM_57600 0x01 ++#define TEKRAM_38400 0x02 ++#define TEKRAM_19200 0x03 ++#define TEKRAM_9600 0x04 ++#define TEKRAM_2400 0x08 ++ ++#define TEKRAM_PW 0x10 /* Pulse select bit */ ++ ++/* 10bit * 1s/115200bit in milliseconds = 87ms*/ ++#define TIME_CONST (10000000ul/115200ul) ++ ++#endif ++ ++#ifdef LIRC_SIR_ACTISYS_ACT200L ++static void init_act200(void); ++#elif defined(LIRC_SIR_ACTISYS_ACT220L) ++static void init_act220(void); ++#endif ++ ++/*** SA1100 ***/ ++#ifdef LIRC_ON_SA1100 ++struct sa1100_ser2_registers { ++ /* HSSP control register */ ++ unsigned char hscr0; ++ /* UART registers */ ++ unsigned char utcr0; ++ unsigned char utcr1; ++ unsigned char utcr2; ++ unsigned char utcr3; ++ unsigned char utcr4; ++ unsigned char utdr; ++ unsigned char utsr0; ++ unsigned char utsr1; ++} sr; ++ ++static int irq = IRQ_Ser2ICP; ++ ++#define LIRC_ON_SA1100_TRANSMITTER_LATENCY 0 ++ ++/* pulse/space ratio of 50/50 */ ++static unsigned long pulse_width = (13-LIRC_ON_SA1100_TRANSMITTER_LATENCY); ++/* 1000000/freq-pulse_width */ ++static unsigned long space_width = (13-LIRC_ON_SA1100_TRANSMITTER_LATENCY); ++static unsigned int freq = 38000; /* modulation frequency */ ++static unsigned int duty_cycle = 50; /* duty cycle of 50% */ ++ ++#endif ++ ++#define RBUF_LEN 1024 ++#define WBUF_LEN 1024 ++ ++#define LIRC_DRIVER_NAME "lirc_sir" ++ ++#define PULSE '[' ++ ++#ifndef LIRC_SIR_TEKRAM ++/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/ ++#define TIME_CONST (9000000ul/115200ul) ++#endif ++ ++ ++/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */ ++#define SIR_TIMEOUT (HZ*5/100) ++ ++#ifndef LIRC_ON_SA1100 ++#ifndef LIRC_IRQ ++#define LIRC_IRQ 4 ++#endif ++#ifndef LIRC_PORT ++/* for external dongles, default to com1 */ ++#if defined(LIRC_SIR_ACTISYS_ACT200L) || \ ++ defined(LIRC_SIR_ACTISYS_ACT220L) || \ ++ defined(LIRC_SIR_TEKRAM) ++#define LIRC_PORT 0x3f8 ++#else ++/* onboard sir ports are typically com3 */ ++#define LIRC_PORT 0x3e8 ++#endif ++#endif ++ ++static int io = LIRC_PORT; ++static int irq = LIRC_IRQ; ++static int threshold = 3; ++#endif ++ ++static DEFINE_SPINLOCK(timer_lock); ++static struct timer_list timerlist; ++/* time of last signal change detected */ ++static struct timeval last_tv = {0, 0}; ++/* time of last UART data ready interrupt */ ++static struct timeval last_intr_tv = {0, 0}; ++static int last_value; ++ ++static DECLARE_WAIT_QUEUE_HEAD(lirc_read_queue); ++ ++static DEFINE_SPINLOCK(hardware_lock); ++ ++static int rx_buf[RBUF_LEN]; ++static unsigned int rx_tail, rx_head; ++static int tx_buf[WBUF_LEN]; ++ ++static int debug; ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \ ++ fmt, ## args); \ ++ } while (0) ++ ++/* SECTION: Prototypes */ ++ ++/* Communication with user-space */ ++static unsigned int lirc_poll(struct file *file, poll_table *wait); ++static ssize_t lirc_read(struct file *file, char *buf, size_t count, ++ loff_t *ppos); ++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, ++ loff_t *pos); ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg); ++static void add_read_queue(int flag, unsigned long val); ++static int init_chrdev(void); ++static void drop_chrdev(void); ++/* Hardware */ ++static irqreturn_t sir_interrupt(int irq, void *dev_id); ++static void send_space(unsigned long len); ++static void send_pulse(unsigned long len); ++static int init_hardware(void); ++static void drop_hardware(void); ++/* Initialisation */ ++static int init_port(void); ++static void drop_port(void); ++ ++#ifdef LIRC_ON_SA1100 ++static void on(void) ++{ ++ PPSR |= PPC_TXD2; ++} ++ ++static void off(void) ++{ ++ PPSR &= ~PPC_TXD2; ++} ++#else ++static inline unsigned int sinp(int offset) ++{ ++ return inb(io + offset); ++} ++ ++static inline void soutp(int offset, int value) ++{ ++ outb(value, io + offset); ++} ++#endif ++ ++#ifndef MAX_UDELAY_MS ++#define MAX_UDELAY_US 5000 ++#else ++#define MAX_UDELAY_US (MAX_UDELAY_MS*1000) ++#endif ++ ++static void safe_udelay(unsigned long usecs) ++{ ++ while (usecs > MAX_UDELAY_US) { ++ udelay(MAX_UDELAY_US); ++ usecs -= MAX_UDELAY_US; ++ } ++ udelay(usecs); ++} ++ ++/* SECTION: Communication with user-space */ ++ ++static unsigned int lirc_poll(struct file *file, poll_table *wait) ++{ ++ poll_wait(file, &lirc_read_queue, wait); ++ if (rx_head != rx_tail) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++ ++static ssize_t lirc_read(struct file *file, char *buf, size_t count, ++ loff_t *ppos) ++{ ++ int n = 0; ++ int retval = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ if (count % sizeof(int)) ++ return -EINVAL; ++ ++ add_wait_queue(&lirc_read_queue, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (n < count) { ++ if (rx_head != rx_tail) { ++ if (copy_to_user((void *) buf + n, ++ (void *) (rx_buf + rx_head), ++ sizeof(int))) { ++ retval = -EFAULT; ++ break; ++ } ++ rx_head = (rx_head + 1) & (RBUF_LEN - 1); ++ n += sizeof(int); ++ } else { ++ if (file->f_flags & O_NONBLOCK) { ++ retval = -EAGAIN; ++ break; ++ } ++ if (signal_pending(current)) { ++ retval = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ } ++ remove_wait_queue(&lirc_read_queue, &wait); ++ set_current_state(TASK_RUNNING); ++ return n ? n : retval; ++} ++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, ++ loff_t *pos) ++{ ++ unsigned long flags; ++ int i; ++ ++ if (n % sizeof(int) || (n / sizeof(int)) > WBUF_LEN) ++ return -EINVAL; ++ if (copy_from_user(tx_buf, buf, n)) ++ return -EFAULT; ++ i = 0; ++ n /= sizeof(int); ++#ifdef LIRC_ON_SA1100 ++ /* disable receiver */ ++ Ser2UTCR3 = 0; ++#endif ++ local_irq_save(flags); ++ while (1) { ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_pulse(tx_buf[i]); ++ i++; ++ if (i >= n) ++ break; ++ if (tx_buf[i]) ++ send_space(tx_buf[i]); ++ i++; ++ } ++ local_irq_restore(flags); ++#ifdef LIRC_ON_SA1100 ++ off(); ++ udelay(1000); /* wait 1ms for IR diode to recover */ ++ Ser2UTCR3 = 0; ++ /* clear status register to prevent unwanted interrupts */ ++ Ser2UTSR0 &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ /* enable receiver */ ++ Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE; ++#endif ++ return n; ++} ++ ++static int lirc_ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ int retval = 0; ++ unsigned long value = 0; ++#ifdef LIRC_ON_SA1100 ++ unsigned int ivalue; ++ ++ if (cmd == LIRC_GET_FEATURES) ++ value = LIRC_CAN_SEND_PULSE | ++ LIRC_CAN_SET_SEND_DUTY_CYCLE | ++ LIRC_CAN_SET_SEND_CARRIER | ++ LIRC_CAN_REC_MODE2; ++ else if (cmd == LIRC_GET_SEND_MODE) ++ value = LIRC_MODE_PULSE; ++ else if (cmd == LIRC_GET_REC_MODE) ++ value = LIRC_MODE_MODE2; ++#else ++ if (cmd == LIRC_GET_FEATURES) ++ value = LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2; ++ else if (cmd == LIRC_GET_SEND_MODE) ++ value = LIRC_MODE_PULSE; ++ else if (cmd == LIRC_GET_REC_MODE) ++ value = LIRC_MODE_MODE2; ++#endif ++ ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ case LIRC_GET_SEND_MODE: ++ case LIRC_GET_REC_MODE: ++ retval = put_user(value, (unsigned long *) arg); ++ break; ++ ++ case LIRC_SET_SEND_MODE: ++ case LIRC_SET_REC_MODE: ++ retval = get_user(value, (unsigned long *) arg); ++ break; ++#ifdef LIRC_ON_SA1100 ++ case LIRC_SET_SEND_DUTY_CYCLE: ++ retval = get_user(ivalue, (unsigned int *) arg); ++ if (retval) ++ return retval; ++ if (ivalue <= 0 || ivalue > 100) ++ return -EINVAL; ++ /* (ivalue/100)*(1000000/freq) */ ++ duty_cycle = ivalue; ++ pulse_width = (unsigned long) duty_cycle*10000/freq; ++ space_width = (unsigned long) 1000000L/freq-pulse_width; ++ if (pulse_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ pulse_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ if (space_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ space_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ break; ++ case LIRC_SET_SEND_CARRIER: ++ retval = get_user(ivalue, (unsigned int *) arg); ++ if (retval) ++ return retval; ++ if (ivalue > 500000 || ivalue < 20000) ++ return -EINVAL; ++ freq = ivalue; ++ pulse_width = (unsigned long) duty_cycle*10000/freq; ++ space_width = (unsigned long) 1000000L/freq-pulse_width; ++ if (pulse_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ pulse_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ if (space_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY) ++ space_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY; ++ break; ++#endif ++ default: ++ retval = -ENOIOCTLCMD; ++ ++ } ++ ++ if (retval) ++ return retval; ++ if (cmd == LIRC_SET_REC_MODE) { ++ if (value != LIRC_MODE_MODE2) ++ retval = -ENOSYS; ++ } else if (cmd == LIRC_SET_SEND_MODE) { ++ if (value != LIRC_MODE_PULSE) ++ retval = -ENOSYS; ++ } ++ ++ return retval; ++} ++ ++static void add_read_queue(int flag, unsigned long val) ++{ ++ unsigned int new_rx_tail; ++ int newval; ++ ++ dprintk("add flag %d with val %lu\n", flag, val); ++ ++ newval = val & PULSE_MASK; ++ ++ /* ++ * statistically, pulses are ~TIME_CONST/2 too long. we could ++ * maybe make this more exact, but this is good enough ++ */ ++ if (flag) { ++ /* pulse */ ++ if (newval > TIME_CONST/2) ++ newval -= TIME_CONST/2; ++ else /* should not ever happen */ ++ newval = 1; ++ newval |= PULSE_BIT; ++ } else { ++ newval += TIME_CONST/2; ++ } ++ new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1); ++ if (new_rx_tail == rx_head) { ++ dprintk("Buffer overrun.\n"); ++ return; ++ } ++ rx_buf[rx_tail] = newval; ++ rx_tail = new_rx_tail; ++ wake_up_interruptible(&lirc_read_queue); ++} ++ ++static struct file_operations lirc_fops = { ++ .owner = THIS_MODULE, ++ .read = lirc_read, ++ .write = lirc_write, ++ .poll = lirc_poll, ++ .ioctl = lirc_ioctl, ++ .open = lirc_dev_fop_open, ++ .release = lirc_dev_fop_close, ++}; ++ ++static int set_use_inc(void *data) ++{ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++} ++ ++static struct lirc_driver driver = { ++ .name = LIRC_DRIVER_NAME, ++ .minor = -1, ++ .code_length = 1, ++ .sample_rate = 0, ++ .data = NULL, ++ .add_to_buf = NULL, ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .fops = &lirc_fops, ++ .dev = NULL, ++ .owner = THIS_MODULE, ++}; ++ ++ ++static int init_chrdev(void) ++{ ++ driver.minor = lirc_register_driver(&driver); ++ if (driver.minor < 0) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static void drop_chrdev(void) ++{ ++ lirc_unregister_driver(driver.minor); ++} ++ ++/* SECTION: Hardware */ ++static long delta(struct timeval *tv1, struct timeval *tv2) ++{ ++ unsigned long deltv; ++ ++ deltv = tv2->tv_sec - tv1->tv_sec; ++ if (deltv > 15) ++ deltv = 0xFFFFFF; ++ else ++ deltv = deltv*1000000 + ++ tv2->tv_usec - ++ tv1->tv_usec; ++ return deltv; ++} ++ ++static void sir_timeout(unsigned long data) ++{ ++ /* ++ * if last received signal was a pulse, but receiving stopped ++ * within the 9 bit frame, we need to finish this pulse and ++ * simulate a signal change to from pulse to space. Otherwise ++ * upper layers will receive two sequences next time. ++ */ ++ ++ unsigned long flags; ++ unsigned long pulse_end; ++ ++ /* avoid interference with interrupt */ ++ spin_lock_irqsave(&timer_lock, flags); ++ if (last_value) { ++#ifndef LIRC_ON_SA1100 ++ /* clear unread bits in UART and restart */ ++ outb(UART_FCR_CLEAR_RCVR, io + UART_FCR); ++#endif ++ /* determine 'virtual' pulse end: */ ++ pulse_end = delta(&last_tv, &last_intr_tv); ++ dprintk("timeout add %d for %lu usec\n", last_value, pulse_end); ++ add_read_queue(last_value, pulse_end); ++ last_value = 0; ++ last_tv = last_intr_tv; ++ } ++ spin_unlock_irqrestore(&timer_lock, flags); ++} ++ ++static irqreturn_t sir_interrupt(int irq, void *dev_id) ++{ ++ unsigned char data; ++ struct timeval curr_tv; ++ static unsigned long deltv; ++#ifdef LIRC_ON_SA1100 ++ int status; ++ static int n; ++ ++ status = Ser2UTSR0; ++ /* ++ * Deal with any receive errors first. The bytes in error may be ++ * the only bytes in the receive FIFO, so we do this first. ++ */ ++ while (status & UTSR0_EIF) { ++ int bstat; ++ ++ if (debug) { ++ dprintk("EIF\n"); ++ bstat = Ser2UTSR1; ++ ++ if (bstat & UTSR1_FRE) ++ dprintk("frame error\n"); ++ if (bstat & UTSR1_ROR) ++ dprintk("receive fifo overrun\n"); ++ if (bstat & UTSR1_PRE) ++ dprintk("parity error\n"); ++ } ++ ++ bstat = Ser2UTDR; ++ n++; ++ status = Ser2UTSR0; ++ } ++ ++ if (status & (UTSR0_RFS | UTSR0_RID)) { ++ do_gettimeofday(&curr_tv); ++ deltv = delta(&last_tv, &curr_tv); ++ do { ++ data = Ser2UTDR; ++ dprintk("%d data: %u\n", n, (unsigned int) data); ++ n++; ++ } while (status & UTSR0_RID && /* do not empty fifo in order to ++ * get UTSR0_RID in any case */ ++ Ser2UTSR1 & UTSR1_RNE); /* data ready */ ++ ++ if (status&UTSR0_RID) { ++ add_read_queue(0 , deltv - n * TIME_CONST); /*space*/ ++ add_read_queue(1, n * TIME_CONST); /*pulse*/ ++ n = 0; ++ last_tv = curr_tv; ++ } ++ } ++ ++ if (status & UTSR0_TFS) ++ printk(KERN_ERR "transmit fifo not full, shouldn't happen\n"); ++ ++ /* We must clear certain bits. */ ++ status &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ if (status) ++ Ser2UTSR0 = status; ++#else ++ unsigned long deltintrtv; ++ unsigned long flags; ++ int iir, lsr; ++ ++ while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { ++ switch (iir&UART_IIR_ID) { /* FIXME toto treba preriedit */ ++ case UART_IIR_MSI: ++ (void) inb(io + UART_MSR); ++ break; ++ case UART_IIR_RLSI: ++ (void) inb(io + UART_LSR); ++ break; ++ case UART_IIR_THRI: ++#if 0 ++ if (lsr & UART_LSR_THRE) /* FIFO is empty */ ++ outb(data, io + UART_TX) ++#endif ++ break; ++ case UART_IIR_RDI: ++ /* avoid interference with timer */ ++ spin_lock_irqsave(&timer_lock, flags); ++ do { ++ del_timer(&timerlist); ++ data = inb(io + UART_RX); ++ do_gettimeofday(&curr_tv); ++ deltv = delta(&last_tv, &curr_tv); ++ deltintrtv = delta(&last_intr_tv, &curr_tv); ++ dprintk("t %lu, d %d\n", deltintrtv, (int)data); ++ /* ++ * if nothing came in last X cycles, ++ * it was gap ++ */ ++ if (deltintrtv > TIME_CONST * threshold) { ++ if (last_value) { ++ dprintk("GAP\n"); ++ /* simulate signal change */ ++ add_read_queue(last_value, ++ deltv - ++ deltintrtv); ++ last_value = 0; ++ last_tv.tv_sec = ++ last_intr_tv.tv_sec; ++ last_tv.tv_usec = ++ last_intr_tv.tv_usec; ++ deltv = deltintrtv; ++ } ++ } ++ data = 1; ++ if (data ^ last_value) { ++ /* ++ * deltintrtv > 2*TIME_CONST, remember? ++ * the other case is timeout ++ */ ++ add_read_queue(last_value, ++ deltv-TIME_CONST); ++ last_value = data; ++ last_tv = curr_tv; ++ if (last_tv.tv_usec >= TIME_CONST) { ++ last_tv.tv_usec -= TIME_CONST; ++ } else { ++ last_tv.tv_sec--; ++ last_tv.tv_usec += 1000000 - ++ TIME_CONST; ++ } ++ } ++ last_intr_tv = curr_tv; ++ if (data) { ++ /* ++ * start timer for end of ++ * sequence detection ++ */ ++ timerlist.expires = jiffies + ++ SIR_TIMEOUT; ++ add_timer(&timerlist); ++ } ++ ++ lsr = inb(io + UART_LSR); ++ } while (lsr & UART_LSR_DR); /* data ready */ ++ spin_unlock_irqrestore(&timer_lock, flags); ++ break; ++ default: ++ break; ++ } ++ } ++#endif ++ return IRQ_RETVAL(IRQ_HANDLED); ++} ++ ++#ifdef LIRC_ON_SA1100 ++static void send_pulse(unsigned long length) ++{ ++ unsigned long k, delay; ++ int flag; ++ ++ if (length == 0) ++ return; ++ /* ++ * this won't give us the carrier frequency we really want ++ * due to integer arithmetic, but we can accept this inaccuracy ++ */ ++ ++ for (k = flag = 0; k < length; k += delay, flag = !flag) { ++ if (flag) { ++ off(); ++ delay = space_width; ++ } else { ++ on(); ++ delay = pulse_width; ++ } ++ safe_udelay(delay); ++ } ++ off(); ++} ++ ++static void send_space(unsigned long length) ++{ ++ if (length == 0) ++ return; ++ off(); ++ safe_udelay(length); ++} ++#else ++static void send_space(unsigned long len) ++{ ++ safe_udelay(len); ++} ++ ++static void send_pulse(unsigned long len) ++{ ++ long bytes_out = len / TIME_CONST; ++ long time_left; ++ ++ time_left = (long)len - (long)bytes_out * (long)TIME_CONST; ++ if (bytes_out == 0) { ++ bytes_out++; ++ time_left = 0; ++ } ++ while (bytes_out--) { ++ outb(PULSE, io + UART_TX); ++ /* FIXME treba seriozne cakanie z char/serial.c */ ++ while (!(inb(io + UART_LSR) & UART_LSR_THRE)) ++ ; ++ } ++#if 0 ++ if (time_left > 0) ++ safe_udelay(time_left); ++#endif ++} ++#endif ++ ++#ifdef CONFIG_SA1100_COLLIE ++static int sa1100_irda_set_power_collie(int state) ++{ ++ if (state) { ++ /* ++ * 0 - off ++ * 1 - short range, lowest power ++ * 2 - medium range, medium power ++ * 3 - maximum range, high power ++ */ ++ ucb1200_set_io_direction(TC35143_GPIO_IR_ON, ++ TC35143_IODIR_OUTPUT); ++ ucb1200_set_io(TC35143_GPIO_IR_ON, TC35143_IODAT_LOW); ++ udelay(100); ++ } else { ++ /* OFF */ ++ ucb1200_set_io_direction(TC35143_GPIO_IR_ON, ++ TC35143_IODIR_OUTPUT); ++ ucb1200_set_io(TC35143_GPIO_IR_ON, TC35143_IODAT_HIGH); ++ } ++ return 0; ++} ++#endif ++ ++static int init_hardware(void) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ /* reset UART */ ++#ifdef LIRC_ON_SA1100 ++#ifdef CONFIG_SA1100_BITSY ++ if (machine_is_bitsy()) { ++ printk(KERN_INFO "Power on IR module\n"); ++ set_bitsy_egpio(EGPIO_BITSY_IR_ON); ++ } ++#endif ++#ifdef CONFIG_SA1100_COLLIE ++ sa1100_irda_set_power_collie(3); /* power on */ ++#endif ++ sr.hscr0 = Ser2HSCR0; ++ ++ sr.utcr0 = Ser2UTCR0; ++ sr.utcr1 = Ser2UTCR1; ++ sr.utcr2 = Ser2UTCR2; ++ sr.utcr3 = Ser2UTCR3; ++ sr.utcr4 = Ser2UTCR4; ++ ++ sr.utdr = Ser2UTDR; ++ sr.utsr0 = Ser2UTSR0; ++ sr.utsr1 = Ser2UTSR1; ++ ++ /* configure GPIO */ ++ /* output */ ++ PPDR |= PPC_TXD2; ++ PSDR |= PPC_TXD2; ++ /* set output to 0 */ ++ off(); ++ ++ /* Enable HP-SIR modulation, and ensure that the port is disabled. */ ++ Ser2UTCR3 = 0; ++ Ser2HSCR0 = sr.hscr0 & (~HSCR0_HSSP); ++ ++ /* clear status register to prevent unwanted interrupts */ ++ Ser2UTSR0 &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ ++ /* 7N1 */ ++ Ser2UTCR0 = UTCR0_1StpBit|UTCR0_7BitData; ++ /* 115200 */ ++ Ser2UTCR1 = 0; ++ Ser2UTCR2 = 1; ++ /* use HPSIR, 1.6 usec pulses */ ++ Ser2UTCR4 = UTCR4_HPSIR|UTCR4_Z1_6us; ++ ++ /* enable receiver, receive fifo interrupt */ ++ Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE; ++ ++ /* clear status register to prevent unwanted interrupts */ ++ Ser2UTSR0 &= (UTSR0_RID | UTSR0_RBB | UTSR0_REB); ++ ++#elif defined(LIRC_SIR_TEKRAM) ++ /* disable FIFO */ ++ soutp(UART_FCR, ++ UART_FCR_CLEAR_RCVR| ++ UART_FCR_CLEAR_XMIT| ++ UART_FCR_TRIGGER_1); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* First of all, disable all interrupts */ ++ soutp(UART_IER, sinp(UART_IER) & ++ (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI))); ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ ++ /* Set divisor to 12 => 9600 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 12); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* power supply */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ safe_udelay(50*1000); ++ ++ /* -DTR low -> reset PIC */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ udelay(1*1000); ++ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(100); ++ ++ ++ /* -RTS low -> send control byte */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(7); ++ soutp(UART_TX, TEKRAM_115200|TEKRAM_PW); ++ ++ /* one byte takes ~1042 usec to transmit at 9600,8N1 */ ++ udelay(1500); ++ ++ /* back to normal operation */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(50); ++ ++ udelay(1500); ++ ++ /* read previous control byte */ ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": 0x%02x\n", sinp(UART_RX)); ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ ++ /* Set DLAB 0, 8 Bit */ ++ soutp(UART_LCR, UART_LCR_WLEN8); ++ /* enable interrupts */ ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI); ++#else ++ outb(0, io + UART_MCR); ++ outb(0, io + UART_IER); ++ /* init UART */ ++ /* set DLAB, speed = 115200 */ ++ outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR); ++ outb(1, io + UART_DLL); outb(0, io + UART_DLM); ++ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */ ++ outb(UART_LCR_WLEN7, io + UART_LCR); ++ /* FIFO operation */ ++ outb(UART_FCR_ENABLE_FIFO, io + UART_FCR); ++ /* interrupts */ ++ /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */ ++ outb(UART_IER_RDI, io + UART_IER); ++ /* turn on UART */ ++ outb(UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2, io + UART_MCR); ++#ifdef LIRC_SIR_ACTISYS_ACT200L ++ init_act200(); ++#elif defined(LIRC_SIR_ACTISYS_ACT220L) ++ init_act220(); ++#endif ++#endif ++ spin_unlock_irqrestore(&hardware_lock, flags); ++ return 0; ++} ++ ++static void drop_hardware(void) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&hardware_lock, flags); ++ ++#ifdef LIRC_ON_SA1100 ++ Ser2UTCR3 = 0; ++ ++ Ser2UTCR0 = sr.utcr0; ++ Ser2UTCR1 = sr.utcr1; ++ Ser2UTCR2 = sr.utcr2; ++ Ser2UTCR4 = sr.utcr4; ++ Ser2UTCR3 = sr.utcr3; ++ ++ Ser2HSCR0 = sr.hscr0; ++#ifdef CONFIG_SA1100_BITSY ++ if (machine_is_bitsy()) ++ clr_bitsy_egpio(EGPIO_BITSY_IR_ON); ++#endif ++#ifdef CONFIG_SA1100_COLLIE ++ sa1100_irda_set_power_collie(0); /* power off */ ++#endif ++#else ++ /* turn off interrupts */ ++ outb(0, io + UART_IER); ++#endif ++ spin_unlock_irqrestore(&hardware_lock, flags); ++} ++ ++/* SECTION: Initialisation */ ++ ++static int init_port(void) ++{ ++ int retval; ++ ++ /* get I/O port access and IRQ line */ ++#ifndef LIRC_ON_SA1100 ++ if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) { ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": i/o port 0x%.4x already in use.\n", io); ++ return -EBUSY; ++ } ++#endif ++ retval = request_irq(irq, sir_interrupt, IRQF_DISABLED, ++ LIRC_DRIVER_NAME, NULL); ++ if (retval < 0) { ++# ifndef LIRC_ON_SA1100 ++ release_region(io, 8); ++# endif ++ printk(KERN_ERR LIRC_DRIVER_NAME ++ ": IRQ %d already in use.\n", ++ irq); ++ return retval; ++ } ++#ifndef LIRC_ON_SA1100 ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": I/O port 0x%.4x, IRQ %d.\n", ++ io, irq); ++#endif ++ ++ init_timer(&timerlist); ++ timerlist.function = sir_timeout; ++ timerlist.data = 0xabadcafe; ++ ++ return 0; ++} ++ ++static void drop_port(void) ++{ ++ free_irq(irq, NULL); ++ del_timer_sync(&timerlist); ++#ifndef LIRC_ON_SA1100 ++ release_region(io, 8); ++#endif ++} ++ ++#ifdef LIRC_SIR_ACTISYS_ACT200L ++/* Crystal/Cirrus CS8130 IR transceiver, used in Actisys Act200L dongle */ ++/* some code borrowed from Linux IRDA driver */ ++ ++/* Register 0: Control register #1 */ ++#define ACT200L_REG0 0x00 ++#define ACT200L_TXEN 0x01 /* Enable transmitter */ ++#define ACT200L_RXEN 0x02 /* Enable receiver */ ++#define ACT200L_ECHO 0x08 /* Echo control chars */ ++ ++/* Register 1: Control register #2 */ ++#define ACT200L_REG1 0x10 ++#define ACT200L_LODB 0x01 /* Load new baud rate count value */ ++#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */ ++ ++/* Register 3: Transmit mode register #2 */ ++#define ACT200L_REG3 0x30 ++#define ACT200L_B0 0x01 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */ ++#define ACT200L_B1 0x02 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */ ++#define ACT200L_CHSY 0x04 /* StartBit Synced 0=bittime, 1=startbit */ ++ ++/* Register 4: Output Power register */ ++#define ACT200L_REG4 0x40 ++#define ACT200L_OP0 0x01 /* Enable LED1C output */ ++#define ACT200L_OP1 0x02 /* Enable LED2C output */ ++#define ACT200L_BLKR 0x04 ++ ++/* Register 5: Receive Mode register */ ++#define ACT200L_REG5 0x50 ++#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */ ++ /*.. other various IRDA bit modes, and TV remote modes..*/ ++ ++/* Register 6: Receive Sensitivity register #1 */ ++#define ACT200L_REG6 0x60 ++#define ACT200L_RS0 0x01 /* receive threshold bit 0 */ ++#define ACT200L_RS1 0x02 /* receive threshold bit 1 */ ++ ++/* Register 7: Receive Sensitivity register #2 */ ++#define ACT200L_REG7 0x70 ++#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */ ++ ++/* Register 8,9: Baud Rate Divider register #1,#2 */ ++#define ACT200L_REG8 0x80 ++#define ACT200L_REG9 0x90 ++ ++#define ACT200L_2400 0x5f ++#define ACT200L_9600 0x17 ++#define ACT200L_19200 0x0b ++#define ACT200L_38400 0x05 ++#define ACT200L_57600 0x03 ++#define ACT200L_115200 0x01 ++ ++/* Register 13: Control register #3 */ ++#define ACT200L_REG13 0xd0 ++#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */ ++ ++/* Register 15: Status register */ ++#define ACT200L_REG15 0xf0 ++ ++/* Register 21: Control register #4 */ ++#define ACT200L_REG21 0x50 ++#define ACT200L_EXCK 0x02 /* Disable clock output driver */ ++#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */ ++ ++static void init_act200(void) ++{ ++ int i; ++ __u8 control[] = { ++ ACT200L_REG15, ++ ACT200L_REG13 | ACT200L_SHDW, ++ ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL, ++ ACT200L_REG13, ++ ACT200L_REG7 | ACT200L_ENPOS, ++ ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1, ++ ACT200L_REG5 | ACT200L_RWIDL, ++ ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR, ++ ACT200L_REG3 | ACT200L_B0, ++ ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN, ++ ACT200L_REG8 | (ACT200L_115200 & 0x0f), ++ ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f), ++ ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE ++ }; ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN8); ++ ++ /* Set divisor to 12 => 9600 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 12); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, UART_LCR_WLEN8); ++ /* Set divisor to 12 => 9600 Baud */ ++ ++ /* power supply */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ for (i = 0; i < 50; i++) ++ safe_udelay(1000); ++ ++ /* Reset the dongle : set RTS low for 25 ms */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2); ++ for (i = 0; i < 25; i++) ++ udelay(1000); ++ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(100); ++ ++ /* Clear DTR and set RTS to enter command mode */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ udelay(7); ++ ++ /* send out the control register settings for 115K 7N1 SIR operation */ ++ for (i = 0; i < sizeof(control); i++) { ++ soutp(UART_TX, control[i]); ++ /* one byte takes ~1042 usec to transmit at 9600,8N1 */ ++ udelay(1500); ++ } ++ ++ /* back to normal operation */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(50); ++ ++ udelay(1500); ++ soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB); ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7); ++ ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ ++ /* Set DLAB 0. */ ++ soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB)); ++ ++ /* Set DLAB 0, 7 Bit */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ ++ /* enable interrupts */ ++ soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI); ++} ++#endif ++ ++#ifdef LIRC_SIR_ACTISYS_ACT220L ++/* ++ * Derived from linux IrDA driver (net/irda/actisys.c) ++ * Drop me a mail for any kind of comment: maxx@spaceboyz.net ++ */ ++ ++void init_act220(void) ++{ ++ int i; ++ ++ /* DLAB 1 */ ++ soutp(UART_LCR, UART_LCR_DLAB|UART_LCR_WLEN7); ++ ++ /* 9600 baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 12); ++ ++ /* DLAB 0 */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ ++ /* reset the dongle, set DTR low for 10us */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2); ++ udelay(10); ++ ++ /* back to normal (still 9600) */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2); ++ ++ /* ++ * send RTS pulses until we reach 115200 ++ * i hope this is really the same for act220l/act220l+ ++ */ ++ for (i = 0; i < 3; i++) { ++ udelay(10); ++ /* set RTS low for 10 us */ ++ soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2); ++ udelay(10); ++ /* set RTS high for 10 us */ ++ soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2); ++ } ++ ++ /* back to normal operation */ ++ udelay(1500); /* better safe than sorry ;) */ ++ ++ /* Set DLAB 1. */ ++ soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7); ++ ++ /* Set divisor to 1 => 115200 Baud */ ++ soutp(UART_DLM, 0); ++ soutp(UART_DLL, 1); ++ ++ /* Set DLAB 0, 7 Bit */ ++ /* The dongle doesn't seem to have any problems with operation at 7N1 */ ++ soutp(UART_LCR, UART_LCR_WLEN7); ++ ++ /* enable interrupts */ ++ soutp(UART_IER, UART_IER_RDI); ++} ++#endif ++ ++static int init_lirc_sir(void) ++{ ++ int retval; ++ ++ init_waitqueue_head(&lirc_read_queue); ++ retval = init_port(); ++ if (retval < 0) ++ return retval; ++ init_hardware(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ++ ": Installed.\n"); ++ return 0; ++} ++ ++ ++static int __init lirc_sir_init(void) ++{ ++ int retval; ++ ++ retval = init_chrdev(); ++ if (retval < 0) ++ return retval; ++ retval = init_lirc_sir(); ++ if (retval) { ++ drop_chrdev(); ++ return retval; ++ } ++ return 0; ++} ++ ++static void __exit lirc_sir_exit(void) ++{ ++ drop_hardware(); ++ drop_chrdev(); ++ drop_port(); ++ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); ++} ++ ++module_init(lirc_sir_init); ++module_exit(lirc_sir_exit); ++ ++#ifdef LIRC_SIR_TEKRAM ++MODULE_DESCRIPTION("Infrared receiver driver for Tekram Irmate 210"); ++MODULE_AUTHOR("Christoph Bartelmus"); ++#elif defined(LIRC_ON_SA1100) ++MODULE_DESCRIPTION("LIRC driver for StrongARM SA1100 embedded microprocessor"); ++MODULE_AUTHOR("Christoph Bartelmus"); ++#elif defined(LIRC_SIR_ACTISYS_ACT200L) ++MODULE_DESCRIPTION("LIRC driver for Actisys Act200L"); ++MODULE_AUTHOR("Karl Bongers"); ++#elif defined(LIRC_SIR_ACTISYS_ACT220L) ++MODULE_DESCRIPTION("LIRC driver for Actisys Act220L(+)"); ++MODULE_AUTHOR("Jan Roemisch"); ++#else ++MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports"); ++MODULE_AUTHOR("Milan Pikula"); ++#endif ++MODULE_LICENSE("GPL"); ++ ++#ifdef LIRC_ON_SA1100 ++module_param(irq, int, S_IRUGO); ++MODULE_PARM_DESC(irq, "Interrupt (16)"); ++#else ++module_param(io, int, S_IRUGO); ++MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)"); ++ ++module_param(irq, int, S_IRUGO); ++MODULE_PARM_DESC(irq, "Interrupt (4 or 3)"); ++ ++module_param(threshold, int, S_IRUGO); ++MODULE_PARM_DESC(threshold, "space detection threshold (3)"); ++#endif ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); +diff --git a/drivers/input/lirc/lirc_streamzap.c b/drivers/input/lirc/lirc_streamzap.c +new file mode 100644 +index 0000000..f4374e8 +--- /dev/null ++++ b/drivers/input/lirc/lirc_streamzap.c +@@ -0,0 +1,794 @@ ++/* ++ * Streamzap Remote Control driver ++ * ++ * Copyright (c) 2005 Christoph Bartelmus ++ * ++ * This driver was based on the work of Greg Wickham and Adrian ++ * Dewhurst. It was substantially rewritten to support correct signal ++ * gaps and now maintains a delay buffer, which is used to present ++ * consistent timing behaviour to user space applications. Without the ++ * delay buffer an ugly hack would be required in lircd, which can ++ * cause sluggish signal decoding in certain situations. ++ * ++ * This driver is based on the USB skeleton driver packaged with the ++ * kernel; copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++#define DRIVER_VERSION "1.28" ++#define DRIVER_NAME "lirc_streamzap" ++#define DRIVER_DESC "Streamzap Remote Control driver" ++ ++static int debug; ++ ++#define USB_STREAMZAP_VENDOR_ID 0x0e9c ++#define USB_STREAMZAP_PRODUCT_ID 0x0000 ++ ++/* Use our own dbg macro */ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG DRIVER_NAME "[%d]: " \ ++ fmt "\n", ## args); \ ++ } while (0) ++ ++/* table of devices that work with this driver */ ++static struct usb_device_id streamzap_table[] = { ++ /* Streamzap Remote Control */ ++ { USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) }, ++ /* Terminating entry */ ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(usb, streamzap_table); ++ ++#define STREAMZAP_PULSE_MASK 0xf0 ++#define STREAMZAP_SPACE_MASK 0x0f ++#define STREAMZAP_RESOLUTION 256 ++ ++/* number of samples buffered */ ++#define STREAMZAP_BUF_LEN 128 ++ ++enum StreamzapDecoderState { ++ PulseSpace, ++ FullPulse, ++ FullSpace, ++ IgnorePulse ++}; ++ ++/* Structure to hold all of our device specific stuff ++ * ++ * some remarks regarding locking: ++ * theoretically this struct can be accessed from three threads: ++ * ++ * - from lirc_dev through set_use_inc/set_use_dec ++ * ++ * - from the USB layer throuh probe/disconnect/irq ++ * ++ * Careful placement of lirc_register_driver/lirc_unregister_driver ++ * calls will prevent conflicts. lirc_dev makes sure that ++ * set_use_inc/set_use_dec are not being executed and will not be ++ * called after lirc_unregister_driver returns. ++ * ++ * - by the timer callback ++ * ++ * The timer is only running when the device is connected and the ++ * LIRC device is open. Making sure the timer is deleted by ++ * set_use_dec will make conflicts impossible. ++ */ ++struct usb_streamzap { ++ ++ /* usb */ ++ /* save off the usb device pointer */ ++ struct usb_device *udev; ++ /* the interface for this device */ ++ struct usb_interface *interface; ++ ++ /* buffer & dma */ ++ unsigned char *buf_in; ++ dma_addr_t dma_in; ++ unsigned int buf_in_len; ++ ++ struct usb_endpoint_descriptor *endpoint; ++ ++ /* IRQ */ ++ struct urb *urb_in; ++ ++ /* lirc */ ++ struct lirc_driver *driver; ++ struct lirc_buffer *delay_buf; ++ ++ /* timer used to support delay buffering */ ++ struct timer_list delay_timer; ++ int timer_running; ++ spinlock_t timer_lock; ++ ++ /* tracks whether we are currently receiving some signal */ ++ int idle; ++ /* sum of signal lengths received since signal start */ ++ unsigned long sum; ++ /* start time of signal; necessary for gap tracking */ ++ struct timeval signal_last; ++ struct timeval signal_start; ++ enum StreamzapDecoderState decoder_state; ++ struct timer_list flush_timer; ++ int flush; ++ int in_use; ++}; ++ ++ ++/* local function prototypes */ ++static int streamzap_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void streamzap_disconnect(struct usb_interface *interface); ++static void usb_streamzap_irq(struct urb *urb); ++static int streamzap_use_inc(void *data); ++static void streamzap_use_dec(void *data); ++static int streamzap_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg); ++static int streamzap_suspend(struct usb_interface *intf, pm_message_t message); ++static int streamzap_resume(struct usb_interface *intf); ++ ++/* usb specific object needed to register this driver with the usb subsystem */ ++ ++static struct usb_driver streamzap_driver = { ++ .name = DRIVER_NAME, ++ .probe = streamzap_probe, ++ .disconnect = streamzap_disconnect, ++ .suspend = streamzap_suspend, ++ .resume = streamzap_resume, ++ .id_table = streamzap_table, ++}; ++ ++static void stop_timer(struct usb_streamzap *sz) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sz->timer_lock, flags); ++ if (sz->timer_running) { ++ sz->timer_running = 0; ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++ del_timer_sync(&sz->delay_timer); ++ } else { ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++ } ++} ++ ++static void flush_timeout(unsigned long arg) ++{ ++ struct usb_streamzap *sz = (struct usb_streamzap *) arg; ++ ++ /* finally start accepting data */ ++ sz->flush = 0; ++} ++static void delay_timeout(unsigned long arg) ++{ ++ unsigned long flags; ++ /* deliver data every 10 ms */ ++ static unsigned long timer_inc = ++ (10000/(1000000/HZ)) == 0 ? 1 : (10000/(1000000/HZ)); ++ struct usb_streamzap *sz = (struct usb_streamzap *) arg; ++ int data; ++ ++ spin_lock_irqsave(&sz->timer_lock, flags); ++ ++ if (!lirc_buffer_empty(sz->delay_buf) && ++ !lirc_buffer_full(sz->driver->rbuf)) { ++ lirc_buffer_read(sz->delay_buf, (unsigned char *) &data); ++ lirc_buffer_write(sz->driver->rbuf, (unsigned char *) &data); ++ } ++ if (!lirc_buffer_empty(sz->delay_buf)) { ++ while (lirc_buffer_available(sz->delay_buf) < ++ STREAMZAP_BUF_LEN / 2 && ++ !lirc_buffer_full(sz->driver->rbuf)) { ++ lirc_buffer_read(sz->delay_buf, ++ (unsigned char *) &data); ++ lirc_buffer_write(sz->driver->rbuf, ++ (unsigned char *) &data); ++ } ++ if (sz->timer_running) { ++ sz->delay_timer.expires = jiffies + timer_inc; ++ add_timer(&sz->delay_timer); ++ } ++ } else { ++ sz->timer_running = 0; ++ } ++ ++ if (!lirc_buffer_empty(sz->driver->rbuf)) ++ wake_up(&sz->driver->rbuf->wait_poll); ++ ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++} ++ ++static void flush_delay_buffer(struct usb_streamzap *sz) ++{ ++ int data; ++ int empty = 1; ++ ++ while (!lirc_buffer_empty(sz->delay_buf)) { ++ empty = 0; ++ lirc_buffer_read(sz->delay_buf, (unsigned char *) &data); ++ if (!lirc_buffer_full(sz->driver->rbuf)) { ++ lirc_buffer_write(sz->driver->rbuf, ++ (unsigned char *) &data); ++ } else { ++ dprintk("buffer overflow", sz->driver->minor); ++ } ++ } ++ if (!empty) ++ wake_up(&sz->driver->rbuf->wait_poll); ++} ++ ++static void push(struct usb_streamzap *sz, unsigned char *data) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sz->timer_lock, flags); ++ if (lirc_buffer_full(sz->delay_buf)) { ++ int read_data; ++ ++ lirc_buffer_read(sz->delay_buf, ++ (unsigned char *) &read_data); ++ if (!lirc_buffer_full(sz->driver->rbuf)) { ++ lirc_buffer_write(sz->driver->rbuf, ++ (unsigned char *) &read_data); ++ } else { ++ dprintk("buffer overflow", sz->driver->minor); ++ } ++ } ++ ++ lirc_buffer_write(sz->delay_buf, data); ++ ++ if (!sz->timer_running) { ++ sz->delay_timer.expires = jiffies + HZ/10; ++ add_timer(&sz->delay_timer); ++ sz->timer_running = 1; ++ } ++ ++ spin_unlock_irqrestore(&sz->timer_lock, flags); ++} ++ ++static void push_full_pulse(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ int pulse; ++ ++ if (sz->idle) { ++ long deltv; ++ int tmp; ++ ++ sz->signal_last = sz->signal_start; ++ do_gettimeofday(&sz->signal_start); ++ ++ deltv = sz->signal_start.tv_sec-sz->signal_last.tv_sec; ++ if (deltv > 15) { ++ tmp = PULSE_MASK; /* really long time */ ++ } else { ++ tmp = (int) (deltv*1000000+ ++ sz->signal_start.tv_usec - ++ sz->signal_last.tv_usec); ++ tmp -= sz->sum; ++ } ++ dprintk("ls %u", sz->driver->minor, tmp); ++ push(sz, (char *)&tmp); ++ ++ sz->idle = 0; ++ sz->sum = 0; ++ } ++ ++ pulse = ((int) value) * STREAMZAP_RESOLUTION; ++ pulse += STREAMZAP_RESOLUTION / 2; ++ sz->sum += pulse; ++ pulse |= PULSE_BIT; ++ ++ dprintk("p %u", sz->driver->minor, pulse & PULSE_MASK); ++ push(sz, (char *)&pulse); ++} ++ ++static void push_half_pulse(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ push_full_pulse(sz, (value & STREAMZAP_PULSE_MASK)>>4); ++} ++ ++static void push_full_space(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ int space; ++ ++ space = ((int) value)*STREAMZAP_RESOLUTION; ++ space += STREAMZAP_RESOLUTION/2; ++ sz->sum += space; ++ dprintk("s %u", sz->driver->minor, space); ++ push(sz, (char *)&space); ++} ++ ++static void push_half_space(struct usb_streamzap *sz, ++ unsigned char value) ++{ ++ push_full_space(sz, value & STREAMZAP_SPACE_MASK); ++} ++ ++/** ++ * usb_streamzap_irq - IRQ handler ++ * ++ * This procedure is invoked on reception of data from ++ * the usb remote. ++ */ ++static void usb_streamzap_irq(struct urb *urb) ++{ ++ struct usb_streamzap *sz; ++ int len; ++ unsigned int i = 0; ++ ++ if (!urb) ++ return; ++ ++ sz = urb->context; ++ len = urb->actual_length; ++ ++ switch (urb->status) { ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ /* ++ * this urb is terminated, clean up. ++ * sz might already be invalid at this point ++ */ ++ dprintk("urb status: %d", -1, urb->status); ++ return; ++ default: ++ break; ++ } ++ ++ dprintk("received %d", sz->driver->minor, urb->actual_length); ++ if (!sz->flush) { ++ for (i = 0; i < urb->actual_length; i++) { ++ dprintk("%d: %x", sz->driver->minor, ++ i, (unsigned char) sz->buf_in[i]); ++ switch (sz->decoder_state) { ++ case PulseSpace: ++ if ((sz->buf_in[i]&STREAMZAP_PULSE_MASK) == ++ STREAMZAP_PULSE_MASK) { ++ sz->decoder_state = FullPulse; ++ continue; ++ } else if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) ++ == STREAMZAP_SPACE_MASK) { ++ push_half_pulse(sz, sz->buf_in[i]); ++ sz->decoder_state = FullSpace; ++ continue; ++ } else { ++ push_half_pulse(sz, sz->buf_in[i]); ++ push_half_space(sz, sz->buf_in[i]); ++ } ++ break; ++ case FullPulse: ++ push_full_pulse(sz, sz->buf_in[i]); ++ sz->decoder_state = IgnorePulse; ++ break; ++ case FullSpace: ++ if (sz->buf_in[i] == 0xff) { ++ sz->idle = 1; ++ stop_timer(sz); ++ flush_delay_buffer(sz); ++ } else ++ push_full_space(sz, sz->buf_in[i]); ++ sz->decoder_state = PulseSpace; ++ break; ++ case IgnorePulse: ++ if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) == ++ STREAMZAP_SPACE_MASK) { ++ sz->decoder_state = FullSpace; ++ continue; ++ } ++ push_half_space(sz, sz->buf_in[i]); ++ sz->decoder_state = PulseSpace; ++ break; ++ } ++ } ++ } ++ ++ usb_submit_urb(urb, GFP_ATOMIC); ++ ++ return; ++} ++ ++static struct file_operations streamzap_fops = { ++ .owner = THIS_MODULE, ++ .ioctl = streamzap_ioctl, ++ .read = lirc_dev_fop_read, ++ .write = lirc_dev_fop_write, ++ .poll = lirc_dev_fop_poll, ++ .open = lirc_dev_fop_open, ++ .release = lirc_dev_fop_close, ++}; ++ ++ ++/** ++ * streamzap_probe ++ * ++ * Called by usb-core to associated with a candidate device ++ * On any failure the return value is the ERROR ++ * On success return 0 ++ */ ++static int streamzap_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *udev = interface_to_usbdev(interface); ++ struct usb_host_interface *iface_host; ++ struct usb_streamzap *sz; ++ struct lirc_driver *driver; ++ struct lirc_buffer *lirc_buf; ++ struct lirc_buffer *delay_buf; ++ char buf[63], name[128] = ""; ++ int retval = -ENOMEM; ++ int minor = 0; ++ ++ /* Allocate space for device driver specific data */ ++ sz = kzalloc(sizeof(struct usb_streamzap), GFP_KERNEL); ++ if (sz == NULL) ++ return -ENOMEM; ++ ++ sz->udev = udev; ++ sz->interface = interface; ++ ++ /* Check to ensure endpoint information matches requirements */ ++ iface_host = interface->cur_altsetting; ++ ++ if (iface_host->desc.bNumEndpoints != 1) { ++ err("%s: Unexpected desc.bNumEndpoints (%d)", __func__, ++ iface_host->desc.bNumEndpoints); ++ retval = -ENODEV; ++ goto free_sz; ++ } ++ ++ sz->endpoint = &(iface_host->endpoint[0].desc); ++ if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ++ != USB_DIR_IN) { ++ err("%s: endpoint doesn't match input device 02%02x", ++ __func__, sz->endpoint->bEndpointAddress); ++ retval = -ENODEV; ++ goto free_sz; ++ } ++ ++ if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ++ != USB_ENDPOINT_XFER_INT) { ++ err("%s: endpoint attributes don't match xfer 02%02x", ++ __func__, sz->endpoint->bmAttributes); ++ retval = -ENODEV; ++ goto free_sz; ++ } ++ ++ if (sz->endpoint->wMaxPacketSize == 0) { ++ err("%s: endpoint message size==0? ", __func__); ++ retval = -ENODEV; ++ goto free_sz; ++ } ++ ++ /* Allocate the USB buffer and IRQ URB */ ++ ++ sz->buf_in_len = sz->endpoint->wMaxPacketSize; ++ sz->buf_in = usb_buffer_alloc(sz->udev, sz->buf_in_len, ++ GFP_ATOMIC, &sz->dma_in); ++ if (sz->buf_in == NULL) ++ goto free_sz; ++ ++ sz->urb_in = usb_alloc_urb(0, GFP_KERNEL); ++ if (sz->urb_in == NULL) ++ goto free_sz; ++ ++ /* Connect this device to the LIRC sub-system */ ++ driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); ++ if (!driver) ++ goto free_sz; ++ ++ lirc_buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!lirc_buf) ++ goto free_driver; ++ if (lirc_buffer_init(lirc_buf, sizeof(int), STREAMZAP_BUF_LEN)) ++ goto kfree_lirc_buf; ++ ++ delay_buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); ++ if (!delay_buf) ++ goto free_lirc_buf; ++ if (lirc_buffer_init(delay_buf, sizeof(int), STREAMZAP_BUF_LEN)) ++ goto kfree_delay_buf; ++ ++ sz->driver = driver; ++ strcpy(sz->driver->name, DRIVER_NAME); ++ sz->driver->minor = -1; ++ sz->driver->sample_rate = 0; ++ sz->driver->code_length = sizeof(int) * 8; ++ sz->driver->features = LIRC_CAN_REC_MODE2 | LIRC_CAN_GET_REC_RESOLUTION; ++ sz->driver->data = sz; ++ sz->driver->rbuf = lirc_buf; ++ sz->delay_buf = delay_buf; ++ sz->driver->set_use_inc = &streamzap_use_inc; ++ sz->driver->set_use_dec = &streamzap_use_dec; ++ sz->driver->fops = &streamzap_fops; ++ sz->driver->dev = &interface->dev; ++ sz->driver->owner = THIS_MODULE; ++ ++ sz->idle = 1; ++ sz->decoder_state = PulseSpace; ++ init_timer(&sz->delay_timer); ++ sz->delay_timer.function = delay_timeout; ++ sz->delay_timer.data = (unsigned long) sz; ++ sz->timer_running = 0; ++ spin_lock_init(&sz->timer_lock); ++ ++ init_timer(&sz->flush_timer); ++ sz->flush_timer.function = flush_timeout; ++ sz->flush_timer.data = (unsigned long) sz; ++ /* Complete final initialisations */ ++ ++ usb_fill_int_urb(sz->urb_in, udev, ++ usb_rcvintpipe(udev, sz->endpoint->bEndpointAddress), ++ sz->buf_in, sz->buf_in_len, usb_streamzap_irq, sz, ++ sz->endpoint->bInterval); ++ sz->urb_in->transfer_dma = sz->dma_in; ++ sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ if (udev->descriptor.iManufacturer ++ && usb_string(udev, udev->descriptor.iManufacturer, ++ buf, sizeof(buf)) > 0) ++ strlcpy(name, buf, sizeof(name)); ++ ++ if (udev->descriptor.iProduct ++ && usb_string(udev, udev->descriptor.iProduct, ++ buf, sizeof(buf)) > 0) ++ snprintf(name + strlen(name), sizeof(name) - strlen(name), ++ " %s", buf); ++ ++ minor = lirc_register_driver(driver); ++ ++ if (minor < 0) ++ goto free_delay_buf; ++ ++ sz->driver->minor = minor; ++ ++ usb_set_intfdata(interface, sz); ++ ++ printk(KERN_INFO DRIVER_NAME "[%d]: %s on usb%d:%d attached\n", ++ sz->driver->minor, name, ++ udev->bus->busnum, sz->udev->devnum); ++ ++ return 0; ++ ++free_delay_buf: ++ lirc_buffer_free(sz->delay_buf); ++kfree_delay_buf: ++ kfree(delay_buf); ++free_lirc_buf: ++ lirc_buffer_free(sz->driver->rbuf); ++kfree_lirc_buf: ++ kfree(lirc_buf); ++free_driver: ++ kfree(driver); ++free_sz: ++ if (retval == -ENOMEM) ++ err("Out of memory"); ++ ++ if (sz) { ++ usb_free_urb(sz->urb_in); ++ usb_buffer_free(udev, sz->buf_in_len, sz->buf_in, sz->dma_in); ++ kfree(sz); ++ } ++ ++ return retval; ++} ++ ++static int streamzap_use_inc(void *data) ++{ ++ struct usb_streamzap *sz = data; ++ ++ if (!sz) { ++ dprintk("%s called with no context", -1, __func__); ++ return -EINVAL; ++ } ++ dprintk("set use inc", sz->driver->minor); ++ ++ lirc_buffer_clear(sz->driver->rbuf); ++ lirc_buffer_clear(sz->delay_buf); ++ ++ sz->flush_timer.expires = jiffies + HZ; ++ sz->flush = 1; ++ add_timer(&sz->flush_timer); ++ ++ sz->urb_in->dev = sz->udev; ++ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) { ++ dprintk("open result = -EIO error submitting urb", ++ sz->driver->minor); ++ return -EIO; ++ } ++ sz->in_use++; ++ ++ return 0; ++} ++ ++static void streamzap_use_dec(void *data) ++{ ++ struct usb_streamzap *sz = data; ++ ++ if (!sz) { ++ dprintk("%s called with no context", -1, __func__); ++ return; ++ } ++ dprintk("set use dec", sz->driver->minor); ++ ++ if (sz->flush) { ++ sz->flush = 0; ++ del_timer_sync(&sz->flush_timer); ++ } ++ ++ usb_kill_urb(sz->urb_in); ++ ++ stop_timer(sz); ++ ++ sz->in_use--; ++} ++ ++static int streamzap_ioctl(struct inode *node, struct file *filep, ++ unsigned int cmd, unsigned long arg) ++{ ++ int result; ++ ++ switch (cmd) { ++ case LIRC_GET_REC_RESOLUTION: ++ result = put_user(STREAMZAP_RESOLUTION, (unsigned int *) arg); ++ if (result) ++ return result; ++ break; ++ default: ++ return lirc_dev_fop_ioctl(node, filep, cmd, arg); ++ } ++ return 0; ++} ++ ++/** ++ * streamzap_disconnect ++ * ++ * Called by the usb core when the device is removed from the system. ++ * ++ * This routine guarantees that the driver will not submit any more urbs ++ * by clearing dev->udev. It is also supposed to terminate any currently ++ * active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(), ++ * does not provide any way to do this. ++ */ ++static void streamzap_disconnect(struct usb_interface *interface) ++{ ++ struct usb_streamzap *sz; ++ int errnum; ++ int minor; ++ ++ sz = usb_get_intfdata(interface); ++ ++ /* unregister from the LIRC sub-system */ ++ ++ errnum = lirc_unregister_driver(sz->driver->minor); ++ if (errnum != 0) ++ dprintk("error in lirc_unregister: (returned %d)", ++ sz->driver->minor, errnum); ++ ++ lirc_buffer_free(sz->delay_buf); ++ lirc_buffer_free(sz->driver->rbuf); ++ ++ /* unregister from the USB sub-system */ ++ ++ usb_free_urb(sz->urb_in); ++ ++ usb_buffer_free(sz->udev, sz->buf_in_len, sz->buf_in, sz->dma_in); ++ ++ minor = sz->driver->minor; ++ kfree(sz->driver->rbuf); ++ kfree(sz->driver); ++ kfree(sz->delay_buf); ++ kfree(sz); ++ ++ printk(KERN_INFO DRIVER_NAME "[%d]: disconnected\n", minor); ++} ++ ++static int streamzap_suspend(struct usb_interface *intf, pm_message_t message) ++{ ++ struct usb_streamzap *sz = usb_get_intfdata(intf); ++ ++ printk(KERN_INFO DRIVER_NAME "[%d]: suspend\n", sz->driver->minor); ++ if (sz->in_use) { ++ if (sz->flush) { ++ sz->flush = 0; ++ del_timer_sync(&sz->flush_timer); ++ } ++ ++ stop_timer(sz); ++ ++ usb_kill_urb(sz->urb_in); ++ } ++ return 0; ++} ++ ++static int streamzap_resume(struct usb_interface *intf) ++{ ++ struct usb_streamzap *sz = usb_get_intfdata(intf); ++ ++ lirc_buffer_clear(sz->driver->rbuf); ++ lirc_buffer_clear(sz->delay_buf); ++ ++ if (sz->in_use) { ++ sz->flush_timer.expires = jiffies + HZ; ++ sz->flush = 1; ++ add_timer(&sz->flush_timer); ++ ++ sz->urb_in->dev = sz->udev; ++ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) { ++ dprintk("open result = -EIO error submitting urb", ++ sz->driver->minor); ++ return -EIO; ++ } ++ } ++ return 0; ++} ++ ++/** ++ * usb_streamzap_init ++ */ ++static int __init usb_streamzap_init(void) ++{ ++ int result; ++ ++ /* register this driver with the USB subsystem */ ++ result = usb_register(&streamzap_driver); ++ ++ if (result) { ++ err("usb_register failed. Error number %d", ++ result); ++ return result; ++ } ++ ++ printk(KERN_INFO DRIVER_NAME " " DRIVER_VERSION " registered\n"); ++ return 0; ++} ++ ++/** ++ * usb_streamzap_exit ++ */ ++static void __exit usb_streamzap_exit(void) ++{ ++ usb_deregister(&streamzap_driver); ++} ++ ++ ++module_init(usb_streamzap_init); ++module_exit(usb_streamzap_exit); ++ ++MODULE_AUTHOR("Christoph Bartelmus, Greg Wickham, Adrian Dewhurst"); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL"); ++ ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); +diff --git a/drivers/input/lirc/lirc_ttusbir.c b/drivers/input/lirc/lirc_ttusbir.c +new file mode 100644 +index 0000000..b0a4e8b +--- /dev/null ++++ b/drivers/input/lirc/lirc_ttusbir.c +@@ -0,0 +1,397 @@ ++/* ++ * lirc_ttusbir.c ++ * ++ * lirc_ttusbir - LIRC device driver for the TechnoTrend USB IR Receiver ++ * ++ * Copyright (C) 2007 Stefan Macher ++ * ++ * This LIRC driver provides access to the TechnoTrend USB IR Receiver. ++ * The receiver delivers the IR signal as raw sampled true/false data in ++ * isochronous USB packets each of size 128 byte. ++ * Currently the driver reduces the sampling rate by factor of 8 as this ++ * is still more than enough to decode RC-5 - others should be analyzed. ++ * But the driver does not rely on RC-5 it should be able to decode every ++ * IR signal that is not too fast. ++ */ ++ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "lirc_dev.h" ++ ++MODULE_DESCRIPTION("TechnoTrend USB IR device driver for LIRC"); ++MODULE_AUTHOR("Stefan Macher (st_maker-lirc@yahoo.de)"); ++MODULE_LICENSE("GPL"); ++ ++/* #define DEBUG */ ++#ifdef DEBUG ++#define DPRINTK printk ++#else ++#define DPRINTK(_x_, a...) ++#endif ++ ++/* function declarations */ ++static int probe(struct usb_interface *intf, const struct usb_device_id *id); ++static void disconnect(struct usb_interface *intf); ++static void urb_complete(struct urb *urb); ++static int set_use_inc(void *data); ++static void set_use_dec(void *data); ++ ++static int num_urbs = 2; ++module_param(num_urbs, int, S_IRUGO); ++MODULE_PARM_DESC(num_urbs, ++ "Number of URBs in queue. Try to increase to 4 in case " ++ "of problems (default: 2; minimum: 2)"); ++ ++/* table of devices that work with this driver */ ++static struct usb_device_id device_id_table[] = { ++ /* TechnoTrend USB IR Receiver */ ++ { USB_DEVICE(0x0B48, 0x2003) }, ++ /* Terminating entry */ ++ { } ++}; ++MODULE_DEVICE_TABLE(usb, device_id_table); ++ ++/* USB driver definition */ ++static struct usb_driver usb_driver = { ++ .name = "TTUSBIR", ++ .id_table = &(device_id_table[0]), ++ .probe = probe, ++ .disconnect = disconnect, ++}; ++ ++/* USB device definition */ ++struct ttusbir_device { ++ struct usb_driver *usb_driver; ++ struct usb_device *udev; ++ struct usb_interface *interf; ++ struct usb_class_driver class_driver; ++ unsigned int ifnum; /* Interface number to use */ ++ unsigned int alt_setting; /* alternate setting to use */ ++ unsigned int endpoint; /* Endpoint to use */ ++ struct urb **urb; /* num_urb URB pointers*/ ++ char **buffer; /* 128 byte buffer for each URB */ ++ struct lirc_buffer rbuf; /* Buffer towards LIRC */ ++ struct lirc_driver driver; ++ int minor; ++ int last_pulse; /* remembers if last received byte was pulse or space */ ++ int last_num; /* remembers how many last bytes appeared */ ++ int opened; ++}; ++ ++/*** LIRC specific functions ***/ ++static int set_use_inc(void *data) ++{ ++ int i, retval; ++ struct ttusbir_device *ttusbir = data; ++ ++ DPRINTK("Sending first URBs\n"); ++ /* @TODO Do I need to check if I am already opened */ ++ ttusbir->opened = 1; ++ ++ for (i = 0; i < num_urbs; i++) { ++ retval = usb_submit_urb(ttusbir->urb[i], GFP_KERNEL); ++ if (retval) { ++ err("%s: usb_submit_urb failed on urb %d", ++ __func__, i); ++ return retval; ++ } ++ } ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct ttusbir_device *ttusbir = data; ++ ++ DPRINTK("Device closed\n"); ++ ++ ttusbir->opened = 0; ++} ++ ++/*** USB specific functions ***/ ++ ++/* ++ * This mapping table is used to do a very simple filtering of the ++ * input signal. ++ * For a value with at least 4 bits set it returns 0xFF otherwise ++ * 0x00. For faster IR signals this can not be used. But for RC-5 we ++ * still have about 14 samples per pulse/space, i.e. we sample with 14 ++ * times higher frequency than the signal frequency ++ */ ++const unsigned char map_table[] = ++{ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0x00, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF ++}; ++ ++static void urb_complete(struct urb *urb) ++{ ++ struct ttusbir_device *ttusbir; ++ unsigned char *buf; ++ int i; ++ int l; ++ ++ ttusbir = urb->context; ++ ++ if (!ttusbir->opened) ++ return; ++ ++ buf = (unsigned char *)urb->transfer_buffer; ++ ++ for (i = 0; i < 128; i++) { ++ /* Here we do the filtering and some kind of down sampling */ ++ buf[i] = ~map_table[buf[i]]; ++ if (ttusbir->last_pulse == buf[i]) { ++ if (ttusbir->last_num < PULSE_MASK/63) ++ ttusbir->last_num++; ++ /* ++ * else we are in a idle period and do not need to ++ * increment any longer ++ */ ++ } else { ++ l = ttusbir->last_num * 62; /* about 62 = us/byte */ ++ if (ttusbir->last_pulse) /* pulse or space? */ ++ l |= PULSE_BIT; ++ if (!lirc_buffer_full(&ttusbir->rbuf)) { ++ lirc_buffer_write(&ttusbir->rbuf, (void *)&l); ++ wake_up_interruptible(&ttusbir->rbuf.wait_poll); ++ } ++ ttusbir->last_num = 0; ++ ttusbir->last_pulse = buf[i]; ++ } ++ } ++ usb_submit_urb(urb, GFP_ATOMIC); /* keep data rolling :-) */ ++} ++ ++/* ++ * Called whenever the USB subsystem thinks we could be the right driver ++ * to handle this device ++ */ ++static int probe(struct usb_interface *intf, const struct usb_device_id *id) ++{ ++ int alt_set, endp; ++ int found = 0; ++ int i, j; ++ int struct_size; ++ struct usb_host_interface *host_interf; ++ struct usb_interface_descriptor *interf_desc; ++ struct usb_host_endpoint *host_endpoint; ++ struct ttusbir_device *ttusbir; ++ ++ DPRINTK("Module ttusbir probe\n"); ++ ++ /* To reduce memory fragmentation we use only one allocation */ ++ struct_size = sizeof(struct ttusbir_device) + ++ (sizeof(struct urb *) * num_urbs) + ++ (sizeof(char *) * num_urbs) + ++ (num_urbs * 128); ++ ttusbir = kzalloc(struct_size, GFP_KERNEL); ++ if (!ttusbir) ++ return -ENOMEM; ++ ++ ttusbir->urb = (struct urb **)((char *)ttusbir + ++ sizeof(struct ttusbir_device)); ++ ttusbir->buffer = (char **)((char *)ttusbir->urb + ++ (sizeof(struct urb *) * num_urbs)); ++ for (i = 0; i < num_urbs; i++) ++ ttusbir->buffer[i] = (char *)ttusbir->buffer + ++ (sizeof(char *)*num_urbs) + (i * 128); ++ ++ ttusbir->usb_driver = &usb_driver; ++ ttusbir->alt_setting = -1; ++ /* @TODO check if error can be returned */ ++ ttusbir->udev = usb_get_dev(interface_to_usbdev(intf)); ++ ttusbir->interf = intf; ++ ttusbir->last_pulse = 0x00; ++ ttusbir->last_num = 0; ++ ++ /* ++ * Now look for interface setting we can handle ++ * We are searching for the alt setting where end point ++ * 0x82 has max packet size 16 ++ */ ++ for (alt_set = 0; alt_set < intf->num_altsetting && !found; alt_set++) { ++ host_interf = &intf->altsetting[alt_set]; ++ interf_desc = &host_interf->desc; ++ for (endp = 0; endp < interf_desc->bNumEndpoints; endp++) { ++ host_endpoint = &host_interf->endpoint[endp]; ++ if ((host_endpoint->desc.bEndpointAddress == 0x82) && ++ (host_endpoint->desc.wMaxPacketSize == 0x10)) { ++ ttusbir->alt_setting = alt_set; ++ ttusbir->endpoint = endp; ++ found = 1; ++ break; ++ } ++ } ++ } ++ if (ttusbir->alt_setting != -1) ++ DPRINTK("alt setting: %d\n", ttusbir->alt_setting); ++ else { ++ err("Could not find alternate setting\n"); ++ kfree(ttusbir); ++ return -EINVAL; ++ } ++ ++ /* OK lets setup this interface setting */ ++ usb_set_interface(ttusbir->udev, 0, ttusbir->alt_setting); ++ ++ /* Store device info in interface structure */ ++ usb_set_intfdata(intf, ttusbir); ++ ++ /* Register as a LIRC driver */ ++ if (lirc_buffer_init(&ttusbir->rbuf, sizeof(int), 256) < 0) { ++ err("Could not get memory for LIRC data buffer\n"); ++ usb_set_intfdata(intf, NULL); ++ kfree(ttusbir); ++ return -ENOMEM; ++ } ++ strcpy(ttusbir->driver.name, "TTUSBIR"); ++ ttusbir->driver.minor = -1; ++ ttusbir->driver.code_length = 1; ++ ttusbir->driver.sample_rate = 0; ++ ttusbir->driver.data = ttusbir; ++ ttusbir->driver.add_to_buf = NULL; ++ ttusbir->driver.rbuf = &ttusbir->rbuf; ++ ttusbir->driver.set_use_inc = set_use_inc; ++ ttusbir->driver.set_use_dec = set_use_dec; ++ ttusbir->driver.dev = &intf->dev; ++ ttusbir->driver.owner = THIS_MODULE; ++ ttusbir->driver.features = LIRC_CAN_REC_MODE2; ++ ttusbir->minor = lirc_register_driver(&ttusbir->driver); ++ if (ttusbir->minor < 0) { ++ err("Error registering as LIRC driver\n"); ++ usb_set_intfdata(intf, NULL); ++ lirc_buffer_free(&ttusbir->rbuf); ++ kfree(ttusbir); ++ return -EIO; ++ } ++ ++ /* Allocate and setup the URB that we will use to talk to the device */ ++ for (i = 0; i < num_urbs; i++) { ++ ttusbir->urb[i] = usb_alloc_urb(8, GFP_KERNEL); ++ if (!ttusbir->urb[i]) { ++ err("Could not allocate memory for the URB\n"); ++ for (j = i - 1; j >= 0; j--) ++ kfree(ttusbir->urb[j]); ++ lirc_buffer_free(&ttusbir->rbuf); ++ lirc_unregister_driver(ttusbir->minor); ++ kfree(ttusbir); ++ usb_set_intfdata(intf, NULL); ++ return -ENOMEM; ++ } ++ ttusbir->urb[i]->dev = ttusbir->udev; ++ ttusbir->urb[i]->context = ttusbir; ++ ttusbir->urb[i]->pipe = usb_rcvisocpipe(ttusbir->udev, ++ ttusbir->endpoint); ++ ttusbir->urb[i]->interval = 1; ++ ttusbir->urb[i]->transfer_flags = URB_ISO_ASAP; ++ ttusbir->urb[i]->transfer_buffer = &ttusbir->buffer[i][0]; ++ ttusbir->urb[i]->complete = urb_complete; ++ ttusbir->urb[i]->number_of_packets = 8; ++ ttusbir->urb[i]->transfer_buffer_length = 128; ++ for (j = 0; j < 8; j++) { ++ ttusbir->urb[i]->iso_frame_desc[j].offset = j*16; ++ ttusbir->urb[i]->iso_frame_desc[j].length = 16; ++ } ++ } ++ return 0; ++} ++ ++/** ++ * Called when the driver is unloaded or the device is unplugged ++ */ ++static void disconnect(struct usb_interface *intf) ++{ ++ int i; ++ struct ttusbir_device *ttusbir; ++ ++ DPRINTK("Module ttusbir disconnect\n"); ++ ++ ttusbir = (struct ttusbir_device *) usb_get_intfdata(intf); ++ usb_set_intfdata(intf, NULL); ++ lirc_unregister_driver(ttusbir->minor); ++ DPRINTK("unregistered\n"); ++ ++ for (i = 0; i < num_urbs; i++) { ++ usb_kill_urb(ttusbir->urb[i]); ++ usb_free_urb(ttusbir->urb[i]); ++ } ++ DPRINTK("URBs killed\n"); ++ lirc_buffer_free(&ttusbir->rbuf); ++ kfree(ttusbir); ++} ++ ++static int ttusbir_init_module(void) ++{ ++ int result; ++ ++ DPRINTK(KERN_DEBUG "Module ttusbir init\n"); ++ ++ /* register this driver with the USB subsystem */ ++ result = usb_register(&usb_driver); ++ if (result) ++ err("usb_register failed. Error number %d", result); ++ return result; ++} ++ ++static void ttusbir_exit_module(void) ++{ ++ printk(KERN_DEBUG "Module ttusbir exit\n"); ++ usb_deregister(&usb_driver); ++} ++ ++module_init(ttusbir_init_module); ++module_exit(ttusbir_exit_module); +diff --git a/drivers/input/lirc/lirc_zilog.c b/drivers/input/lirc/lirc_zilog.c +new file mode 100644 +index 0000000..3a5bc34 +--- /dev/null ++++ b/drivers/input/lirc/lirc_zilog.c +@@ -0,0 +1,1396 @@ ++/* ++ * i2c IR lirc driver for devices with zilog IR processors ++ * ++ * Copyright (c) 2000 Gerd Knorr ++ * modified for PixelView (BT878P+W/FM) by ++ * Michal Kochanowicz ++ * Christoph Bartelmus ++ * modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by ++ * Ulrich Mueller ++ * modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by ++ * Stefan Jahn ++ * modified for inclusion into kernel sources by ++ * Jerome Brock ++ * modified for Leadtek Winfast PVR2000 by ++ * Thomas Reitmayr (treitmayr@yahoo.com) ++ * modified for Hauppauge PVR-150 IR TX device by ++ * Mark Weaver ++ * changed name from lirc_pvr150 to lirc_zilog, works on more than pvr-150 ++ * Jarod Wilson ++ * ++ * parts are cut&pasted from the lirc_i2c.c driver ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "lirc_dev.h" ++#include ++ ++struct IR { ++ struct lirc_driver l; ++ ++ /* Device info */ ++ struct mutex ir_lock; ++ int open; ++ ++ /* RX device */ ++ struct i2c_client c_rx; ++ int have_rx; ++ ++ /* RX device buffer & lock */ ++ struct lirc_buffer buf; ++ struct mutex buf_lock; ++ ++ /* RX polling thread data */ ++ struct completion *t_notify; ++ struct completion *t_notify2; ++ int shutdown; ++ struct task_struct *task; ++ ++ /* RX read data */ ++ unsigned char b[3]; ++ ++ /* TX device */ ++ struct i2c_client c_tx; ++ int need_boot; ++ int have_tx; ++}; ++ ++/* Minor -> data mapping */ ++static struct IR *ir_devices[MAX_IRCTL_DEVICES]; ++ ++/* Block size for IR transmitter */ ++#define TX_BLOCK_SIZE 99 ++ ++/* Hauppauge IR transmitter data */ ++struct tx_data_struct { ++ /* Boot block */ ++ unsigned char *boot_data; ++ ++ /* Start of binary data block */ ++ unsigned char *datap; ++ ++ /* End of binary data block */ ++ unsigned char *endp; ++ ++ /* Number of installed codesets */ ++ unsigned int num_code_sets; ++ ++ /* Pointers to codesets */ ++ unsigned char **code_sets; ++ ++ /* Global fixed data template */ ++ int fixed[TX_BLOCK_SIZE]; ++}; ++ ++static struct tx_data_struct *tx_data; ++static struct mutex tx_data_lock; ++ ++#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \ ++ ## args) ++#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) ++ ++#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX" ++#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX" ++ ++/* module parameters */ ++static int debug; /* debug output */ ++static int disable_rx; /* disable RX device */ ++static int disable_tx; /* disable TX device */ ++static int minor = -1; /* minor number */ ++ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug) \ ++ printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \ ++ ## args); \ ++ } while (0) ++ ++static int add_to_buf(struct IR *ir) ++{ ++ __u16 code; ++ unsigned char codes[2]; ++ unsigned char keybuf[6]; ++ int got_data = 0; ++ int ret; ++ int failures = 0; ++ unsigned char sendbuf[1] = { 0 }; ++ ++ if (lirc_buffer_full(&ir->buf)) { ++ dprintk("buffer overflow\n"); ++ return -EOVERFLOW; ++ } ++ ++ /* ++ * service the device as long as it is returning ++ * data and we have space ++ */ ++ do { ++ /* ++ * Lock i2c bus for the duration. RX/TX chips interfere so ++ * this is worth it ++ */ ++ mutex_lock(&ir->ir_lock); ++ ++ /* ++ * Send random "poll command" (?) Windows driver does this ++ * and it is a good point to detect chip failure. ++ */ ++ ret = i2c_master_send(&ir->c_rx, sendbuf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ if (failures >= 3) { ++ mutex_unlock(&ir->ir_lock); ++ zilog_error("unable to read from the IR chip " ++ "after 3 resets, giving up\n"); ++ return ret; ++ } ++ ++ /* Looks like the chip crashed, reset it */ ++ zilog_error("polling the IR receiver chip failed, " ++ "trying reset\n"); ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout((100 * HZ + 999) / 1000); ++ ir->need_boot = 1; ++ ++ ++failures; ++ mutex_unlock(&ir->ir_lock); ++ continue; ++ } ++ ++ ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf)); ++ mutex_unlock(&ir->ir_lock); ++ if (ret != sizeof(keybuf)) { ++ zilog_error("i2c_master_recv failed with %d -- " ++ "keeping last read buffer\n", ret); ++ } else { ++ ir->b[0] = keybuf[3]; ++ ir->b[1] = keybuf[4]; ++ ir->b[2] = keybuf[5]; ++ dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]); ++ } ++ ++ /* key pressed ? */ ++#ifdef I2C_HW_B_HDPVR ++ if (ir->c_rx.adapter->id == I2C_HW_B_HDPVR) { ++ if (got_data && (keybuf[0] == 0x80)) ++ return 0; ++ else if (got_data && (keybuf[0] == 0x00)) ++ return -ENODATA; ++ } else if ((ir->b[0] & 0x80) == 0) ++#else ++ if ((ir->b[0] & 0x80) == 0) ++#endif ++ return got_data ? 0 : -ENODATA; ++ ++ /* look what we have */ ++ code = (((__u16)ir->b[0] & 0x7f) << 6) | (ir->b[1] >> 2); ++ ++ codes[0] = (code >> 8) & 0xff; ++ codes[1] = code & 0xff; ++ ++ /* return it */ ++ lirc_buffer_write(&ir->buf, codes); ++ ++got_data; ++ } while (!lirc_buffer_full(&ir->buf)); ++ ++ return 0; ++} ++ ++/* ++ * Main function of the polling thread -- from lirc_dev. ++ * We don't fit the LIRC model at all anymore. This is horrible, but ++ * basically we have a single RX/TX device with a nasty failure mode ++ * that needs to be accounted for across the pair. lirc lets us provide ++ * fops, but prevents us from using the internal polling, etc. if we do ++ * so. Hence the replication. Might be neater to extend the LIRC model ++ * to account for this but I'd think it's a very special case of seriously ++ * messed up hardware. ++ */ ++static int lirc_thread(void *arg) ++{ ++ struct IR *ir = arg; ++ ++ if (ir->t_notify != NULL) ++ complete(ir->t_notify); ++ ++ dprintk("poll thread started\n"); ++ ++ do { ++ if (ir->open) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ /* ++ * This is ~113*2 + 24 + jitter (2*repeat gap + ++ * code length). We use this interval as the chip ++ * resets every time you poll it (bad!). This is ++ * therefore just sufficient to catch all of the ++ * button presses. It makes the remote much more ++ * responsive. You can see the difference by ++ * running irw and holding down a button. With ++ * 100ms, the old polling interval, you'll notice ++ * breaks in the repeat sequence corresponding to ++ * lost keypresses. ++ */ ++ schedule_timeout((260 * HZ) / 1000); ++ if (ir->shutdown) ++ break; ++ if (!add_to_buf(ir)) ++ wake_up_interruptible(&ir->buf.wait_poll); ++ } else { ++ /* if device not opened so we can sleep half a second */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule_timeout(HZ/2); ++ } ++ } while (!ir->shutdown); ++ ++ if (ir->t_notify2 != NULL) ++ wait_for_completion(ir->t_notify2); ++ ++ ir->task = NULL; ++ if (ir->t_notify != NULL) ++ complete(ir->t_notify); ++ ++ dprintk("poll thread ended\n"); ++ return 0; ++} ++ ++static int set_use_inc(void *data) ++{ ++ struct IR *ir = data; ++ ++ if (ir->l.owner == NULL || try_module_get(ir->l.owner) == 0) ++ return -ENODEV; ++ ++ /* lock bttv in memory while /dev/lirc is in use */ ++ /* ++ * this is completely broken code. lirc_unregister_driver() ++ * must be possible even when the device is open ++ */ ++ if (ir->c_rx.addr) ++ i2c_use_client(&ir->c_rx); ++ if (ir->c_tx.addr) ++ i2c_use_client(&ir->c_tx); ++ ++ return 0; ++} ++ ++static void set_use_dec(void *data) ++{ ++ struct IR *ir = data; ++ ++ if (ir->c_rx.addr) ++ i2c_release_client(&ir->c_rx); ++ if (ir->c_tx.addr) ++ i2c_release_client(&ir->c_tx); ++ if (ir->l.owner != NULL) ++ module_put(ir->l.owner); ++} ++ ++/* safe read of a uint32 (always network byte order) */ ++static int read_uint32(unsigned char **data, ++ unsigned char *endp, unsigned int *val) ++{ ++ if (*data + 4 > endp) ++ return 0; ++ *val = ((*data)[0] << 24) | ((*data)[1] << 16) | ++ ((*data)[2] << 8) | (*data)[3]; ++ *data += 4; ++ return 1; ++} ++ ++/* safe read of a uint8 */ ++static int read_uint8(unsigned char **data, ++ unsigned char *endp, unsigned char *val) ++{ ++ if (*data + 1 > endp) ++ return 0; ++ *val = *((*data)++); ++ return 1; ++} ++ ++/* safe skipping of N bytes */ ++static int skip(unsigned char **data, ++ unsigned char *endp, unsigned int distance) ++{ ++ if (*data + distance > endp) ++ return 0; ++ *data += distance; ++ return 1; ++} ++ ++/* decompress key data into the given buffer */ ++static int get_key_data(unsigned char *buf, ++ unsigned int codeset, unsigned int key) ++{ ++ unsigned char *data, *endp, *diffs, *key_block; ++ unsigned char keys, ndiffs, id; ++ unsigned int base, lim, pos, i; ++ ++ /* Binary search for the codeset */ ++ for (base = 0, lim = tx_data->num_code_sets; lim; lim >>= 1) { ++ pos = base + (lim >> 1); ++ data = tx_data->code_sets[pos]; ++ ++ if (!read_uint32(&data, tx_data->endp, &i)) ++ goto corrupt; ++ ++ if (i == codeset) ++ break; ++ else if (codeset > i) { ++ base = pos + 1; ++ --lim; ++ } ++ } ++ /* Not found? */ ++ if (!lim) ++ return -EPROTO; ++ ++ /* Set end of data block */ ++ endp = pos < tx_data->num_code_sets - 1 ? ++ tx_data->code_sets[pos + 1] : tx_data->endp; ++ ++ /* Read the block header */ ++ if (!read_uint8(&data, endp, &keys) || ++ !read_uint8(&data, endp, &ndiffs) || ++ ndiffs > TX_BLOCK_SIZE || keys == 0) ++ goto corrupt; ++ ++ /* Save diffs & skip */ ++ diffs = data; ++ if (!skip(&data, endp, ndiffs)) ++ goto corrupt; ++ ++ /* Read the id of the first key */ ++ if (!read_uint8(&data, endp, &id)) ++ goto corrupt; ++ ++ /* Unpack the first key's data */ ++ for (i = 0; i < TX_BLOCK_SIZE; ++i) { ++ if (tx_data->fixed[i] == -1) { ++ if (!read_uint8(&data, endp, &buf[i])) ++ goto corrupt; ++ } else { ++ buf[i] = (unsigned char)tx_data->fixed[i]; ++ } ++ } ++ ++ /* Early out key found/not found */ ++ if (key == id) ++ return 0; ++ if (keys == 1) ++ return -EPROTO; ++ ++ /* Sanity check */ ++ key_block = data; ++ if (!skip(&data, endp, (keys - 1) * (ndiffs + 1))) ++ goto corrupt; ++ ++ /* Binary search for the key */ ++ for (base = 0, lim = keys - 1; lim; lim >>= 1) { ++ /* Seek to block */ ++ unsigned char *key_data; ++ pos = base + (lim >> 1); ++ key_data = key_block + (ndiffs + 1) * pos; ++ ++ if (*key_data == key) { ++ /* skip key id */ ++ ++key_data; ++ ++ /* found, so unpack the diffs */ ++ for (i = 0; i < ndiffs; ++i) { ++ unsigned char val; ++ if (!read_uint8(&key_data, endp, &val) || ++ diffs[i] >= TX_BLOCK_SIZE) ++ goto corrupt; ++ buf[diffs[i]] = val; ++ } ++ ++ return 0; ++ } else if (key > *key_data) { ++ base = pos + 1; ++ --lim; ++ } ++ } ++ /* Key not found */ ++ return -EPROTO; ++ ++corrupt: ++ zilog_error("firmware is corrupt\n"); ++ return -EFAULT; ++} ++ ++/* send a block of data to the IR TX device */ ++static int send_data_block(struct IR *ir, unsigned char *data_block) ++{ ++ int i, j, ret; ++ unsigned char buf[5]; ++ ++ for (i = 0; i < TX_BLOCK_SIZE;) { ++ int tosend = TX_BLOCK_SIZE - i; ++ if (tosend > 4) ++ tosend = 4; ++ buf[0] = (unsigned char)(i + 1); ++ for (j = 0; j < tosend; ++j) ++ buf[1 + j] = data_block[i + j]; ++ dprintk("%02x %02x %02x %02x %02x", ++ buf[0], buf[1], buf[2], buf[3], buf[4]); ++ ret = i2c_master_send(&ir->c_tx, buf, tosend + 1); ++ if (ret != tosend + 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ i += tosend; ++ } ++ return 0; ++} ++ ++/* send boot data to the IR TX device */ ++static int send_boot_data(struct IR *ir) ++{ ++ int ret; ++ unsigned char buf[4]; ++ ++ /* send the boot block */ ++ ret = send_data_block(ir, tx_data->boot_data); ++ if (ret != 0) ++ return ret; ++ ++ /* kick it off? */ ++ buf[0] = 0x00; ++ buf[1] = 0x20; ++ ret = i2c_master_send(&ir->c_tx, buf, 2); ++ if (ret != 2) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ret = i2c_master_send(&ir->c_tx, buf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Here comes the firmware version... (hopefully) */ ++ ret = i2c_master_recv(&ir->c_tx, buf, 4); ++ if (ret != 4) { ++ zilog_error("i2c_master_recv failed with %d\n", ret); ++ return 0; ++ } ++ if (buf[0] != 0x80) { ++ zilog_error("unexpected IR TX response: %02x\n", buf[0]); ++ return 0; ++ } ++ zilog_notify("Zilog/Hauppauge IR blaster firmware version " ++ "%d.%d.%d loaded\n", buf[1], buf[2], buf[3]); ++ ++ return 0; ++} ++ ++/* unload "firmware", lock held */ ++static void fw_unload_locked(void) ++{ ++ if (tx_data) { ++ if (tx_data->code_sets) ++ vfree(tx_data->code_sets); ++ ++ if (tx_data->datap) ++ vfree(tx_data->datap); ++ ++ vfree(tx_data); ++ tx_data = NULL; ++ dprintk("successfully unloaded IR blaster firmware\n"); ++ } ++} ++ ++/* unload "firmware" for the IR TX device */ ++static void fw_unload(void) ++{ ++ mutex_lock(&tx_data_lock); ++ fw_unload_locked(); ++ mutex_unlock(&tx_data_lock); ++} ++ ++/* load "firmware" for the IR TX device */ ++static int fw_load(struct IR *ir) ++{ ++ int ret; ++ unsigned int i; ++ unsigned char *data, version, num_global_fixed; ++ const struct firmware *fw_entry; ++ ++ /* Already loaded? */ ++ mutex_lock(&tx_data_lock); ++ if (tx_data) { ++ ret = 0; ++ goto out; ++ } ++ ++ /* Request codeset data file */ ++ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev); ++ if (ret != 0) { ++ zilog_error("firmware haup-ir-blaster.bin not available " ++ "(%d)\n", ret); ++ ret = ret < 0 ? ret : -EFAULT; ++ goto out; ++ } ++ dprintk("firmware of size %zu loaded\n", fw_entry->size); ++ ++ /* Parse the file */ ++ tx_data = vmalloc(sizeof(*tx_data)); ++ if (tx_data == NULL) { ++ zilog_error("out of memory\n"); ++ release_firmware(fw_entry); ++ ret = -ENOMEM; ++ goto out; ++ } ++ tx_data->code_sets = NULL; ++ ++ /* Copy the data so hotplug doesn't get confused and timeout */ ++ tx_data->datap = vmalloc(fw_entry->size); ++ if (tx_data->datap == NULL) { ++ zilog_error("out of memory\n"); ++ release_firmware(fw_entry); ++ vfree(tx_data); ++ ret = -ENOMEM; ++ goto out; ++ } ++ memcpy(tx_data->datap, fw_entry->data, fw_entry->size); ++ tx_data->endp = tx_data->datap + fw_entry->size; ++ release_firmware(fw_entry); fw_entry = NULL; ++ ++ /* Check version */ ++ data = tx_data->datap; ++ if (!read_uint8(&data, tx_data->endp, &version)) ++ goto corrupt; ++ if (version != 1) { ++ zilog_error("unsupported code set file version (%u, expected" ++ "1) -- please upgrade to a newer driver", ++ version); ++ fw_unload_locked(); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ /* Save boot block for later */ ++ tx_data->boot_data = data; ++ if (!skip(&data, tx_data->endp, TX_BLOCK_SIZE)) ++ goto corrupt; ++ ++ if (!read_uint32(&data, tx_data->endp, ++ &tx_data->num_code_sets)) ++ goto corrupt; ++ ++ dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets); ++ ++ tx_data->code_sets = vmalloc( ++ tx_data->num_code_sets * sizeof(char *)); ++ if (tx_data->code_sets == NULL) { ++ fw_unload_locked(); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ for (i = 0; i < TX_BLOCK_SIZE; ++i) ++ tx_data->fixed[i] = -1; ++ ++ /* Read global fixed data template */ ++ if (!read_uint8(&data, tx_data->endp, &num_global_fixed) || ++ num_global_fixed > TX_BLOCK_SIZE) ++ goto corrupt; ++ for (i = 0; i < num_global_fixed; ++i) { ++ unsigned char pos, val; ++ if (!read_uint8(&data, tx_data->endp, &pos) || ++ !read_uint8(&data, tx_data->endp, &val) || ++ pos >= TX_BLOCK_SIZE) ++ goto corrupt; ++ tx_data->fixed[pos] = (int)val; ++ } ++ ++ /* Filch out the position of each code set */ ++ for (i = 0; i < tx_data->num_code_sets; ++i) { ++ unsigned int id; ++ unsigned char keys; ++ unsigned char ndiffs; ++ ++ /* Save the codeset position */ ++ tx_data->code_sets[i] = data; ++ ++ /* Read header */ ++ if (!read_uint32(&data, tx_data->endp, &id) || ++ !read_uint8(&data, tx_data->endp, &keys) || ++ !read_uint8(&data, tx_data->endp, &ndiffs) || ++ ndiffs > TX_BLOCK_SIZE || keys == 0) ++ goto corrupt; ++ ++ /* skip diff positions */ ++ if (!skip(&data, tx_data->endp, ndiffs)) ++ goto corrupt; ++ ++ /* ++ * After the diffs we have the first key id + data - ++ * global fixed ++ */ ++ if (!skip(&data, tx_data->endp, ++ 1 + TX_BLOCK_SIZE - num_global_fixed)) ++ goto corrupt; ++ ++ /* Then we have keys-1 blocks of key id+diffs */ ++ if (!skip(&data, tx_data->endp, ++ (ndiffs + 1) * (keys - 1))) ++ goto corrupt; ++ } ++ ret = 0; ++ goto out; ++ ++corrupt: ++ zilog_error("firmware is corrupt\n"); ++ fw_unload_locked(); ++ ret = -EFAULT; ++ ++out: ++ mutex_unlock(&tx_data_lock); ++ return ret; ++} ++ ++/* initialise the IR TX device */ ++static int tx_init(struct IR *ir) ++{ ++ int ret; ++ ++ /* Load 'firmware' */ ++ ret = fw_load(ir); ++ if (ret != 0) ++ return ret; ++ ++ /* Send boot block */ ++ ret = send_boot_data(ir); ++ if (ret != 0) ++ return ret; ++ ir->need_boot = 0; ++ ++ /* Looks good */ ++ return 0; ++} ++ ++/* do nothing stub to make LIRC happy */ ++static loff_t lseek(struct file *filep, loff_t offset, int orig) ++{ ++ return -ESPIPE; ++} ++ ++/* copied from lirc_dev */ ++static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ unsigned char buf[ir->buf.chunk_size]; ++ int ret = 0, written = 0; ++ DECLARE_WAITQUEUE(wait, current); ++ ++ dprintk("read called\n"); ++ if (ir->c_rx.addr == 0) ++ return -ENODEV; ++ ++ if (mutex_lock_interruptible(&ir->buf_lock)) ++ return -ERESTARTSYS; ++ ++ if (n % ir->buf.chunk_size) { ++ dprintk("read result = -EINVAL\n"); ++ mutex_unlock(&ir->buf_lock); ++ return -EINVAL; ++ } ++ ++ /* ++ * we add ourselves to the task queue before buffer check ++ * to avoid losing scan code (in case when queue is awaken somewhere ++ * between while condition checking and scheduling) ++ */ ++ add_wait_queue(&ir->buf.wait_poll, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ /* ++ * while we didn't provide 'length' bytes, device is opened in blocking ++ * mode and 'copy_to_user' is happy, wait for data. ++ */ ++ while (written < n && ret == 0) { ++ if (lirc_buffer_empty(&ir->buf)) { ++ /* ++ * According to the read(2) man page, 'written' can be ++ * returned as less than 'n', instead of blocking ++ * again, returning -EWOULDBLOCK, or returning ++ * -ERESTARTSYS ++ */ ++ if (written) ++ break; ++ if (filep->f_flags & O_NONBLOCK) { ++ ret = -EWOULDBLOCK; ++ break; ++ } ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } else { ++ lirc_buffer_read(&ir->buf, buf); ++ ret = copy_to_user((void *)outbuf+written, buf, ++ ir->buf.chunk_size); ++ written += ir->buf.chunk_size; ++ } ++ } ++ ++ remove_wait_queue(&ir->buf.wait_poll, &wait); ++ set_current_state(TASK_RUNNING); ++ mutex_unlock(&ir->buf_lock); ++ ++ dprintk("read result = %s (%d)\n", ++ ret ? "-EFAULT" : "OK", ret); ++ ++ return ret ? ret : written; ++} ++ ++/* send a keypress to the IR TX device */ ++static int send_code(struct IR *ir, unsigned int code, unsigned int key) ++{ ++ unsigned char data_block[TX_BLOCK_SIZE]; ++ unsigned char buf[2]; ++ int i, ret; ++ ++ /* Get data for the codeset/key */ ++ ret = get_key_data(data_block, code, key); ++ ++ if (ret == -EPROTO) { ++ zilog_error("failed to get data for code %u, key %u -- check " ++ "lircd.conf entries\n", code, key); ++ return ret; ++ } else if (ret != 0) ++ return ret; ++ ++ /* Send the data block */ ++ ret = send_data_block(ir, data_block); ++ if (ret != 0) ++ return ret; ++ ++ /* Send data block length? */ ++ buf[0] = 0x00; ++ buf[1] = 0x40; ++ ret = i2c_master_send(&ir->c_tx, buf, 2); ++ if (ret != 2) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ret = i2c_master_send(&ir->c_tx, buf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Send finished download? */ ++ ret = i2c_master_recv(&ir->c_tx, buf, 1); ++ if (ret != 1) { ++ zilog_error("i2c_master_recv failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ if (buf[0] != 0xA0) { ++ zilog_error("unexpected IR TX response #1: %02x\n", ++ buf[0]); ++ return -EFAULT; ++ } ++ ++ /* Send prepare command? */ ++ buf[0] = 0x00; ++ buf[1] = 0x80; ++ ret = i2c_master_send(&ir->c_tx, buf, 2); ++ if (ret != 2) { ++ zilog_error("i2c_master_send failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++#ifdef I2C_HW_B_HDPVR ++ /* ++ * The sleep bits aren't necessary on the HD PVR, and in fact, the ++ * last i2c_master_recv always fails with a -5, so for now, we're ++ * going to skip this whole mess and say we're done on the HD PVR ++ */ ++ if (ir->c_rx.adapter->id == I2C_HW_B_HDPVR) ++ goto done; ++#endif ++ ++ /* ++ * This bit NAKs until the device is ready, so we retry it ++ * sleeping a bit each time. This seems to be what the windows ++ * driver does, approximately. ++ * Try for up to 1s. ++ */ ++ for (i = 0; i < 20; ++i) { ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout((50 * HZ + 999) / 1000); ++ ret = i2c_master_send(&ir->c_tx, buf, 1); ++ if (ret == 1) ++ break; ++ dprintk("NAK expected: i2c_master_send " ++ "failed with %d (try %d)\n", ret, i+1); ++ } ++ if (ret != 1) { ++ zilog_error("IR TX chip never got ready: last i2c_master_send " ++ "failed with %d\n", ret); ++ return ret < 0 ? ret : -EFAULT; ++ } ++ ++ /* Seems to be an 'ok' response */ ++ i = i2c_master_recv(&ir->c_tx, buf, 1); ++ if (i != 1) { ++ zilog_error("i2c_master_recv failed with %d\n", ret); ++ return -EFAULT; ++ } ++ if (buf[0] != 0x80) { ++ zilog_error("unexpected IR TX response #2: %02x\n", buf[0]); ++ return -EFAULT; ++ } ++ ++done: ++ /* Oh good, it worked */ ++ dprintk("sent code %u, key %u\n", code, key); ++ return 0; ++} ++ ++/* ++ * Write a code to the device. We take in a 32-bit number (an int) and then ++ * decode this to a codeset/key index. The key data is then decompressed and ++ * sent to the device. We have a spin lock as per i2c documentation to prevent ++ * multiple concurrent sends which would probably cause the device to explode. ++ */ ++static ssize_t write(struct file *filep, const char *buf, size_t n, ++ loff_t *ppos) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ size_t i; ++ int failures = 0; ++ ++ if (ir->c_tx.addr == 0) ++ return -ENODEV; ++ ++ /* Validate user parameters */ ++ if (n % sizeof(int)) ++ return -EINVAL; ++ ++ /* Lock i2c bus for the duration */ ++ mutex_lock(&ir->ir_lock); ++ ++ /* Send each keypress */ ++ for (i = 0; i < n;) { ++ int ret = 0; ++ int command; ++ ++ if (copy_from_user(&command, buf + i, sizeof(command))) { ++ mutex_unlock(&ir->ir_lock); ++ return -EFAULT; ++ } ++ ++ /* Send boot data first if required */ ++ if (ir->need_boot == 1) { ++ ret = send_boot_data(ir); ++ if (ret == 0) ++ ir->need_boot = 0; ++ } ++ ++ /* Send the code */ ++ if (ret == 0) { ++ ret = send_code(ir, (unsigned)command >> 16, ++ (unsigned)command & 0xFFFF); ++ if (ret == -EPROTO) { ++ mutex_unlock(&ir->ir_lock); ++ return ret; ++ } ++ } ++ ++ /* ++ * Hmm, a failure. If we've had a few then give up, otherwise ++ * try a reset ++ */ ++ if (ret != 0) { ++ /* Looks like the chip crashed, reset it */ ++ zilog_error("sending to the IR transmitter chip " ++ "failed, trying reset\n"); ++ ++ if (failures >= 3) { ++ zilog_error("unable to send to the IR chip " ++ "after 3 resets, giving up\n"); ++ mutex_unlock(&ir->ir_lock); ++ return ret; ++ } ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout((100 * HZ + 999) / 1000); ++ ir->need_boot = 1; ++ ++failures; ++ } else ++ i += sizeof(int); ++ } ++ ++ /* Release i2c bus */ ++ mutex_unlock(&ir->ir_lock); ++ ++ /* All looks good */ ++ return n; ++} ++ ++/* copied from lirc_dev */ ++static unsigned int poll(struct file *filep, poll_table *wait) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ unsigned int ret; ++ ++ dprintk("poll called\n"); ++ if (ir->c_rx.addr == 0) ++ return -ENODEV; ++ ++ mutex_lock(&ir->buf_lock); ++ ++ poll_wait(filep, &ir->buf.wait_poll, wait); ++ ++ dprintk("poll result = %s\n", ++ lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM"); ++ ++ ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM); ++ ++ mutex_unlock(&ir->buf_lock); ++ return ret; ++} ++ ++static int ioctl(struct inode *node, struct file *filep, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct IR *ir = (struct IR *)filep->private_data; ++ int result; ++ unsigned long mode, features = 0; ++ ++ if (ir->c_rx.addr != 0) ++ features |= LIRC_CAN_REC_LIRCCODE; ++ if (ir->c_tx.addr != 0) ++ features |= LIRC_CAN_SEND_PULSE; ++ ++ switch (cmd) { ++ case LIRC_GET_LENGTH: ++ result = put_user((unsigned long)13, ++ (unsigned long *)arg); ++ break; ++ case LIRC_GET_FEATURES: ++ result = put_user(features, (unsigned long *) arg); ++ break; ++ case LIRC_GET_REC_MODE: ++ if (!(features&LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = put_user(LIRC_REC2MODE ++ (features&LIRC_CAN_REC_MASK), ++ (unsigned long *)arg); ++ break; ++ case LIRC_SET_REC_MODE: ++ if (!(features&LIRC_CAN_REC_MASK)) ++ return -ENOSYS; ++ ++ result = get_user(mode, (unsigned long *)arg); ++ if (!result && !(LIRC_MODE2REC(mode) & features)) ++ result = -EINVAL; ++ break; ++ case LIRC_GET_SEND_MODE: ++ if (!(features&LIRC_CAN_SEND_MASK)) ++ return -ENOSYS; ++ ++ result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg); ++ break; ++ case LIRC_SET_SEND_MODE: ++ if (!(features&LIRC_CAN_SEND_MASK)) ++ return -ENOSYS; ++ ++ result = get_user(mode, (unsigned long *) arg); ++ if (!result && mode != LIRC_MODE_PULSE) ++ return -EINVAL; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return result; ++} ++ ++/* ++ * Open the IR device. Get hold of our IR structure and ++ * stash it in private_data for the file ++ */ ++static int open(struct inode *node, struct file *filep) ++{ ++ struct IR *ir; ++ int ret; ++ ++ /* find our IR struct */ ++ unsigned minor = MINOR(node->i_rdev); ++ if (minor >= MAX_IRCTL_DEVICES) { ++ dprintk("minor %d: open result = -ENODEV\n", ++ minor); ++ return -ENODEV; ++ } ++ ir = ir_devices[minor]; ++ ++ /* increment in use count */ ++ mutex_lock(&ir->ir_lock); ++ ++ir->open; ++ ret = set_use_inc(ir); ++ if (ret != 0) { ++ --ir->open; ++ mutex_unlock(&ir->ir_lock); ++ return ret; ++ } ++ mutex_unlock(&ir->ir_lock); ++ ++ /* stash our IR struct */ ++ filep->private_data = ir; ++ ++ return 0; ++} ++ ++/* Close the IR device */ ++static int close(struct inode *node, struct file *filep) ++{ ++ /* find our IR struct */ ++ struct IR *ir = (struct IR *)filep->private_data; ++ if (ir == NULL) { ++ zilog_error("close: no private_data attached to the file!\n"); ++ return -ENODEV; ++ } ++ ++ /* decrement in use count */ ++ mutex_lock(&ir->ir_lock); ++ --ir->open; ++ set_use_dec(ir); ++ mutex_unlock(&ir->ir_lock); ++ ++ return 0; ++} ++ ++static struct lirc_driver lirc_template = { ++ .name = "lirc_zilog", ++ .set_use_inc = set_use_inc, ++ .set_use_dec = set_use_dec, ++ .owner = THIS_MODULE ++}; ++ ++static int ir_remove(struct i2c_client *client); ++static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id); ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg); ++ ++static const struct i2c_device_id ir_transceiver_id[] = { ++ /* Generic entry for any IR transceiver */ ++ { "ir_video", 0 }, ++ /* IR device specific entries should be added here */ ++ { "ir_tx_z8f0811_haup", 0 }, ++ { "ir_rx_z8f0811_haup", 0 }, ++ { } ++}; ++ ++static struct i2c_driver driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "Zilog/Hauppauge i2c IR", ++ }, ++ .probe = ir_probe, ++ .remove = ir_remove, ++ .command = ir_command, ++ .id_table = ir_transceiver_id, ++}; ++ ++static struct file_operations lirc_fops = { ++ .owner = THIS_MODULE, ++ .llseek = lseek, ++ .read = read, ++ .write = write, ++ .poll = poll, ++ .ioctl = ioctl, ++ .open = open, ++ .release = close ++}; ++ ++static int ir_remove(struct i2c_client *client) ++{ ++ struct IR *ir = i2c_get_clientdata(client); ++ ++ mutex_lock(&ir->ir_lock); ++ ++ if (ir->have_rx || ir->have_tx) { ++ DECLARE_COMPLETION(tn); ++ DECLARE_COMPLETION(tn2); ++ ++ /* end up polling thread */ ++ if (ir->task && !IS_ERR(ir->task)) { ++ ir->t_notify = &tn; ++ ir->t_notify2 = &tn2; ++ ir->shutdown = 1; ++ wake_up_process(ir->task); ++ complete(&tn2); ++ wait_for_completion(&tn); ++ ir->t_notify = NULL; ++ ir->t_notify2 = NULL; ++ } ++ ++ } else { ++ mutex_unlock(&ir->ir_lock); ++ zilog_error("%s: detached from something we didn't " ++ "attach to\n", __func__); ++ return -ENODEV; ++ } ++ ++ /* unregister lirc driver */ ++ if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) { ++ lirc_unregister_driver(ir->l.minor); ++ ir_devices[ir->l.minor] = NULL; ++ } ++ ++ /* free memory */ ++ lirc_buffer_free(&ir->buf); ++ mutex_unlock(&ir->ir_lock); ++ kfree(ir); ++ ++ return 0; ++} ++ ++static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) ++{ ++ struct IR *ir = NULL; ++ struct i2c_adapter *adap = client->adapter; ++ char buf; ++ int ret; ++ int have_rx = 0, have_tx = 0; ++ ++ dprintk("%s: adapter id=0x%x, client addr=0x%02x\n", ++ __func__, adap->id, client->addr); ++ ++ /* if this isn't an appropriate device, bail w/-ENODEV now */ ++ if (!(adap->id == I2C_HW_B_BT848 || ++#ifdef I2C_HW_B_HDPVR ++ adap->id == I2C_HW_B_HDPVR || ++#endif ++ adap->id == I2C_HW_B_CX2341X)) ++ goto out_nodev; ++ ++ /* ++ * The external IR receiver is at i2c address 0x71. ++ * The IR transmitter is at 0x70. ++ */ ++ client->addr = 0x70; ++ ++ if (!disable_tx) { ++ if (i2c_master_recv(client, &buf, 1) == 1) ++ have_tx = 1; ++ dprintk("probe 0x70 @ %s: %s\n", ++ adap->name, have_tx ? "success" : "failed"); ++ } ++ ++ if (!disable_rx) { ++ client->addr = 0x71; ++ if (i2c_master_recv(client, &buf, 1) == 1) ++ have_rx = 1; ++ dprintk("probe 0x71 @ %s: %s\n", ++ adap->name, have_rx ? "success" : "failed"); ++ } ++ ++ if (!(have_rx || have_tx)) { ++ zilog_error("%s: no devices found\n", adap->name); ++ goto out_nodev; ++ } ++ ++ printk(KERN_INFO "lirc_zilog: chip found with %s\n", ++ have_rx && have_tx ? "RX and TX" : ++ have_rx ? "RX only" : "TX only"); ++ ++ ir = kzalloc(sizeof(struct IR), GFP_KERNEL); ++ ++ if (!ir) ++ goto out_nomem; ++ ++ ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2); ++ if (ret) ++ goto out_nomem; ++ ++ mutex_init(&ir->ir_lock); ++ mutex_init(&ir->buf_lock); ++ ir->need_boot = 1; ++ ++ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver)); ++ ir->l.minor = -1; ++ ++ /* I2C attach to device */ ++ i2c_set_clientdata(client, ir); ++ ++ /* initialise RX device */ ++ if (have_rx) { ++ DECLARE_COMPLETION(tn); ++ memcpy(&ir->c_rx, client, sizeof(struct i2c_client)); ++ ++ ir->c_rx.addr = 0x71; ++ strncpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME, ++ I2C_NAME_SIZE); ++ ++ /* try to fire up polling thread */ ++ ir->t_notify = &tn; ++ ir->task = kthread_run(lirc_thread, ir, "lirc_zilog"); ++ if (IS_ERR(ir->task)) { ++ ret = PTR_ERR(ir->task); ++ zilog_error("lirc_register_driver: cannot run " ++ "poll thread %d\n", ret); ++ goto err; ++ } ++ wait_for_completion(&tn); ++ ir->t_notify = NULL; ++ ir->have_rx = 1; ++ } ++ ++ /* initialise TX device */ ++ if (have_tx) { ++ memcpy(&ir->c_tx, client, sizeof(struct i2c_client)); ++ ir->c_tx.addr = 0x70; ++ strncpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME, ++ I2C_NAME_SIZE); ++ ir->have_tx = 1; ++ } ++ ++ /* set lirc_dev stuff */ ++ ir->l.code_length = 13; ++ ir->l.rbuf = &ir->buf; ++ ir->l.fops = &lirc_fops; ++ ir->l.data = ir; ++ ir->l.minor = minor; ++ ir->l.dev = &adap->dev; ++ ir->l.sample_rate = 0; ++ ++ /* register with lirc */ ++ ir->l.minor = lirc_register_driver(&ir->l); ++ if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) { ++ zilog_error("ir_attach: \"minor\" must be between 0 and %d " ++ "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor); ++ ret = -EBADRQC; ++ goto err; ++ } ++ ++ /* store this for getting back in open() later on */ ++ ir_devices[ir->l.minor] = ir; ++ ++ /* ++ * if we have the tx device, load the 'firmware'. We do this ++ * after registering with lirc as otherwise hotplug seems to take ++ * 10s to create the lirc device. ++ */ ++ if (have_tx) { ++ /* Special TX init */ ++ ret = tx_init(ir); ++ if (ret != 0) ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ /* undo everything, hopefully... */ ++ if (ir->c_rx.addr) ++ ir_remove(&ir->c_rx); ++ if (ir->c_tx.addr) ++ ir_remove(&ir->c_tx); ++ return ret; ++ ++out_nodev: ++ zilog_error("no device found\n"); ++ return -ENODEV; ++ ++out_nomem: ++ zilog_error("memory allocation failure\n"); ++ kfree(ir); ++ return -ENOMEM; ++} ++ ++static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg) ++{ ++ /* nothing */ ++ return 0; ++} ++ ++static int __init zilog_init(void) ++{ ++ int ret; ++ ++ zilog_notify("Zilog/Hauppauge IR driver initializing\n"); ++ ++ mutex_init(&tx_data_lock); ++ ++ request_module("firmware_class"); ++ ++ ret = i2c_add_driver(&driver); ++ if (ret) ++ zilog_error("initialization failed\n"); ++ else ++ zilog_notify("initialization complete\n"); ++ ++ return ret; ++} ++ ++static void __exit zilog_exit(void) ++{ ++ i2c_del_driver(&driver); ++ /* if loaded */ ++ fw_unload(); ++ zilog_notify("Zilog/Hauppauge IR driver unloaded\n"); ++} ++ ++module_init(zilog_init); ++module_exit(zilog_exit); ++ ++MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)"); ++MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, " ++ "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver"); ++MODULE_LICENSE("GPL"); ++/* for compat with old name, which isn't all that accurate anymore */ ++MODULE_ALIAS("lirc_pvr150"); ++ ++module_param(minor, int, 0444); ++MODULE_PARM_DESC(minor, "Preferred minor device number"); ++ ++module_param(debug, bool, 0644); ++MODULE_PARM_DESC(debug, "Enable debugging messages"); ++ ++module_param(disable_rx, bool, 0644); ++MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device"); ++ ++module_param(disable_tx, bool, 0644); ++MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device"); +diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig +index 16ec523..1196110 100644 +--- a/drivers/input/misc/Kconfig ++++ b/drivers/input/misc/Kconfig +@@ -319,4 +319,16 @@ config INPUT_PCAP + To compile this driver as a module, choose M here: the + module will be called pcap_keys. + ++config INPUT_IMON ++ tristate "SoundGraph iMON Receiver and Display" ++ depends on USB_ARCH_HAS_HCD ++ select USB ++ select INPUT_SPARSEKMAP ++ help ++ Say Y here if you want to use a SoundGraph iMON (aka Antec Veris) ++ IR Receiver and/or LCD/VFD/VGA display. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called imon. ++ + endif +diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile +index a8b8485..79358ff 100644 +--- a/drivers/input/misc/Makefile ++++ b/drivers/input/misc/Makefile +@@ -13,6 +13,7 @@ obj-$(CONFIG_INPUT_CM109) += cm109.o + obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o + obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o + obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o ++obj-$(CONFIG_INPUT_IMON) += imon.o + obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o + obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o + obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o +diff --git a/drivers/input/misc/imon.c b/drivers/input/misc/imon.c +new file mode 100644 +index 0000000..71223e2 +--- /dev/null ++++ b/drivers/input/misc/imon.c +@@ -0,0 +1,2430 @@ ++/* ++ * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD ++ * ++ * Copyright(C) 2009 Jarod Wilson ++ * Portions based on the original lirc_imon driver, ++ * Copyright(C) 2004 Venky Raju(dev@venky.ws) ++ * ++ * imon is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MOD_AUTHOR "Jarod Wilson " ++#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" ++#define MOD_NAME "imon" ++#define MOD_VERSION "0.8" ++ ++#define DISPLAY_MINOR_BASE 144 ++#define DEVICE_NAME "lcd%d" ++ ++#define BUF_CHUNK_SIZE 8 ++#define BUF_SIZE 128 ++ ++#define BIT_DURATION 250 /* each bit received is 250us */ ++ ++#define IMON_CLOCK_ENABLE_PACKETS 2 ++#define IMON_KEY_RELEASE_OFFSET 1000 ++ ++/*** P R O T O T Y P E S ***/ ++ ++/* USB Callback prototypes */ ++static int imon_probe(struct usb_interface *interface, ++ const struct usb_device_id *id); ++static void imon_disconnect(struct usb_interface *interface); ++static void usb_rx_callback_intf0(struct urb *urb); ++static void usb_rx_callback_intf1(struct urb *urb); ++static void usb_tx_callback(struct urb *urb); ++ ++/* suspend/resume support */ ++static int imon_resume(struct usb_interface *intf); ++static int imon_suspend(struct usb_interface *intf, pm_message_t message); ++ ++/* Display file_operations function prototypes */ ++static int display_open(struct inode *inode, struct file *file); ++static int display_close(struct inode *inode, struct file *file); ++ ++/* VFD write operation */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/* LCD file_operations override function prototypes */ ++static ssize_t lcd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos); ++ ++/*** G L O B A L S ***/ ++ ++struct imon_context { ++ struct device *dev; ++ struct usb_device *usbdev_intf0; ++ /* Newer devices have two interfaces */ ++ struct usb_device *usbdev_intf1; ++ bool display_supported; /* not all controllers do */ ++ bool display_isopen; /* display port has been opened */ ++ bool ir_isassociating; /* IR port open for association */ ++ bool dev_present_intf0; /* USB device presence, interface 0 */ ++ bool dev_present_intf1; /* USB device presence, interface 1 */ ++ struct mutex lock; /* to lock this object */ ++ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ ++ ++ struct usb_endpoint_descriptor *rx_endpoint_intf0; ++ struct usb_endpoint_descriptor *rx_endpoint_intf1; ++ struct usb_endpoint_descriptor *tx_endpoint; ++ struct urb *rx_urb_intf0; ++ struct urb *rx_urb_intf1; ++ struct urb *tx_urb; ++ bool tx_control; ++ unsigned char usb_rx_buf[8]; ++ unsigned char usb_tx_buf[8]; ++ ++ struct tx_t { ++ unsigned char data_buf[35]; /* user data buffer */ ++ struct completion finished; /* wait for write to finish */ ++ bool busy; /* write in progress */ ++ int status; /* status of tx completion */ ++ } tx; ++ ++ u16 vendor; /* usb vendor ID */ ++ u16 product; /* usb product ID */ ++ int ir_protocol; /* iMON or MCE (RC6) IR protocol? */ ++ struct input_dev *idev; /* input device for remote */ ++ struct input_dev *touch; /* input device for touchscreen */ ++ int ki; /* current input keycode key index */ ++ u16 kc; /* current input keycode */ ++ u16 last_keycode; /* last reported input keycode */ ++ u8 mce_toggle_bit; /* last mce toggle bit */ ++ int display_type; /* store the display type */ ++ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */ ++ int touch_x; /* x coordinate on touchscreen */ ++ int touch_y; /* y coordinate on touchscreen */ ++ char name_idev[128]; /* input device name */ ++ char phys_idev[64]; /* input device phys path */ ++ struct timer_list itimer; /* input device timer, need for rc6 */ ++ char name_touch[128]; /* touch screen name */ ++ char phys_touch[64]; /* touch screen phys path */ ++ struct timer_list ttimer; /* touch screen timer */ ++}; ++ ++#define TOUCH_TIMEOUT (HZ/30) ++#define MCE_TIMEOUT_MS 200 ++ ++/* vfd character device file operations */ ++static const struct file_operations vfd_fops = { ++ .owner = THIS_MODULE, ++ .open = &display_open, ++ .write = &vfd_write, ++ .release = &display_close ++}; ++ ++/* lcd character device file operations */ ++static const struct file_operations lcd_fops = { ++ .owner = THIS_MODULE, ++ .open = &display_open, ++ .write = &lcd_write, ++ .release = &display_close ++}; ++ ++enum { ++ IMON_DISPLAY_TYPE_AUTO = 0, ++ IMON_DISPLAY_TYPE_VFD = 1, ++ IMON_DISPLAY_TYPE_LCD = 2, ++ IMON_DISPLAY_TYPE_VGA = 3, ++ IMON_DISPLAY_TYPE_NONE = 4, ++}; ++ ++enum { ++ IMON_IR_PROTOCOL_IMON = 0, ++ IMON_IR_PROTOCOL_MCE = 1, ++ IMON_IR_PROTOCOL_IMON_NOPAD = 2, ++}; ++ ++enum { ++ IMON_BUTTON_IMON = 0, ++ IMON_BUTTON_MCE = 1, ++ IMON_BUTTON_PANEL = 2, ++}; ++ ++/* ++ * USB Device ID for iMON USB Control Boards ++ * ++ * The Windows drivers contain 6 different inf files, more or less one for ++ * each new device until the 0x0034-0x0046 devices, which all use the same ++ * driver. Some of the devices in the 34-46 range haven't been definitively ++ * identified yet. Early devices have either a TriGem Computer, Inc. or a ++ * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later ++ * devices use the SoundGraph vendor ID (0x15c2). This driver only supports ++ * the ffdc and later devices, which do onboard decoding. ++ */ ++static struct usb_device_id imon_usb_id_table[] = { ++ /* ++ * Several devices with this same device ID, all use iMON_PAD.inf ++ * SoundGraph iMON PAD (IR & VFD) ++ * SoundGraph iMON PAD (IR & LCD) ++ * SoundGraph iMON Knob (IR only) ++ */ ++ { USB_DEVICE(0x15c2, 0xffdc) }, ++ ++ /* ++ * Newer devices, all driven by the latest iMON Windows driver, full ++ * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2' ++ * Need user input to fill in details on unknown devices. ++ */ ++ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */ ++ { USB_DEVICE(0x15c2, 0x0034) }, ++ /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */ ++ { USB_DEVICE(0x15c2, 0x0035) }, ++ /* SoundGraph iMON OEM VFD (IR & VFD) */ ++ { USB_DEVICE(0x15c2, 0x0036) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x0037) }, ++ /* SoundGraph iMON OEM LCD (IR & LCD) */ ++ { USB_DEVICE(0x15c2, 0x0038) }, ++ /* SoundGraph iMON UltraBay (IR & LCD) */ ++ { USB_DEVICE(0x15c2, 0x0039) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x003a) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x003b) }, ++ /* SoundGraph iMON OEM Inside (IR only) */ ++ { USB_DEVICE(0x15c2, 0x003c) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x003d) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x003e) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x003f) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x0040) }, ++ /* SoundGraph iMON MINI (IR only) */ ++ { USB_DEVICE(0x15c2, 0x0041) }, ++ /* Antec Veris Multimedia Station EZ External (IR only) */ ++ { USB_DEVICE(0x15c2, 0x0042) }, ++ /* Antec Veris Multimedia Station Basic Internal (IR only) */ ++ { USB_DEVICE(0x15c2, 0x0043) }, ++ /* Antec Veris Multimedia Station Elite (IR & VFD) */ ++ { USB_DEVICE(0x15c2, 0x0044) }, ++ /* Antec Veris Multimedia Station Premiere (IR & LCD) */ ++ { USB_DEVICE(0x15c2, 0x0045) }, ++ /* device specifics unknown */ ++ { USB_DEVICE(0x15c2, 0x0046) }, ++ {} ++}; ++ ++/* iMON LCD models use a different write op */ ++static struct usb_device_id lcd_device_list[] = { ++ { USB_DEVICE(0x15c2, 0xffdc) }, ++ { USB_DEVICE(0x15c2, 0x0038) }, ++ { USB_DEVICE(0x15c2, 0x0039) }, ++ { USB_DEVICE(0x15c2, 0x0045) }, ++ {} ++}; ++ ++/* Some iMON devices have no lcd/vfd, don't set one up */ ++static struct usb_device_id ir_only_list[] = { ++ /* the first imon lcd and the knob share this device id. :\ */ ++ /*{ USB_DEVICE(0x15c2, 0xffdc) },*/ ++ { USB_DEVICE(0x15c2, 0x003c) }, ++ { USB_DEVICE(0x15c2, 0x0041) }, ++ { USB_DEVICE(0x15c2, 0x0042) }, ++ { USB_DEVICE(0x15c2, 0x0043) }, ++ {} ++}; ++ ++/* iMON devices with VGA touchscreens */ ++static struct usb_device_id imon_touchscreen_list[] = { ++ { USB_DEVICE(0x15c2, 0x0034) }, ++ { USB_DEVICE(0x15c2, 0x0035) }, ++ {} ++}; ++ ++/* USB Device data */ ++static struct usb_driver imon_driver = { ++ .name = MOD_NAME, ++ .probe = imon_probe, ++ .disconnect = imon_disconnect, ++ .suspend = imon_suspend, ++ .resume = imon_resume, ++ .id_table = imon_usb_id_table, ++}; ++ ++static struct usb_class_driver imon_vfd_class = { ++ .name = DEVICE_NAME, ++ .fops = &vfd_fops, ++ .minor_base = DISPLAY_MINOR_BASE, ++}; ++ ++static struct usb_class_driver imon_lcd_class = { ++ .name = DEVICE_NAME, ++ .fops = &lcd_fops, ++ .minor_base = DISPLAY_MINOR_BASE, ++}; ++ ++/* standard imon remote key table */ ++static const struct key_entry imon_remote_key_table[] = { ++ /* keys sorted mostly by frequency of use to optimize lookups */ ++ { KE_KEY, 0x2a8195b7, { KEY_REWIND } }, ++ { KE_KEY, 0x298315b7, { KEY_REWIND } }, ++ { KE_KEY, 0x2b8115b7, { KEY_FASTFORWARD } }, ++ { KE_KEY, 0x2b8315b7, { KEY_FASTFORWARD } }, ++ { KE_KEY, 0x2b9115b7, { KEY_PREVIOUS } }, ++ { KE_KEY, 0x298195b7, { KEY_NEXT } }, ++ ++ { KE_KEY, 0x2a8115b7, { KEY_PLAY } }, ++ { KE_KEY, 0x2a8315b7, { KEY_PLAY } }, ++ { KE_KEY, 0x2a9115b7, { KEY_PAUSE } }, ++ { KE_KEY, 0x2b9715b7, { KEY_STOP } }, ++ { KE_KEY, 0x298115b7, { KEY_RECORD } }, ++ ++ { KE_KEY, 0x01008000, { KEY_UP } }, ++ { KE_KEY, 0x01007f00, { KEY_DOWN } }, ++ { KE_KEY, 0x01000080, { KEY_LEFT } }, ++ { KE_KEY, 0x0100007f, { KEY_RIGHT } }, ++ ++ { KE_KEY, 0x2aa515b7, { KEY_UP } }, ++ { KE_KEY, 0x289515b7, { KEY_DOWN } }, ++ { KE_KEY, 0x29a515b7, { KEY_LEFT } }, ++ { KE_KEY, 0x2ba515b7, { KEY_RIGHT } }, ++ ++ { KE_KEY, 0x0200002c, { KEY_SPACE } }, /* Select/Space */ ++ { KE_KEY, 0x02000028, { KEY_ENTER } }, ++ { KE_KEY, 0x288195b7, { KEY_EXIT } }, ++ { KE_KEY, 0x02000029, { KEY_ESC } }, ++ { KE_KEY, 0x0200002a, { KEY_BACKSPACE } }, ++ ++ { KE_KEY, 0x2b9595b7, { KEY_MUTE } }, ++ { KE_KEY, 0x28a395b7, { KEY_VOLUMEUP } }, ++ { KE_KEY, 0x28a595b7, { KEY_VOLUMEDOWN } }, ++ { KE_KEY, 0x289395b7, { KEY_CHANNELUP } }, ++ { KE_KEY, 0x288795b7, { KEY_CHANNELDOWN } }, ++ ++ { KE_KEY, 0x0200001e, { KEY_NUMERIC_1 } }, ++ { KE_KEY, 0x0200001f, { KEY_NUMERIC_2 } }, ++ { KE_KEY, 0x02000020, { KEY_NUMERIC_3 } }, ++ { KE_KEY, 0x02000021, { KEY_NUMERIC_4 } }, ++ { KE_KEY, 0x02000022, { KEY_NUMERIC_5 } }, ++ { KE_KEY, 0x02000023, { KEY_NUMERIC_6 } }, ++ { KE_KEY, 0x02000024, { KEY_NUMERIC_7 } }, ++ { KE_KEY, 0x02000025, { KEY_NUMERIC_8 } }, ++ { KE_KEY, 0x02000026, { KEY_NUMERIC_9 } }, ++ { KE_KEY, 0x02000027, { KEY_NUMERIC_0 } }, ++ ++ { KE_KEY, 0x02200025, { KEY_NUMERIC_STAR } }, ++ { KE_KEY, 0x02200020, { KEY_NUMERIC_POUND } }, ++ ++ { KE_KEY, 0x2b8515b7, { KEY_VIDEO } }, ++ { KE_KEY, 0x299195b7, { KEY_AUDIO } }, ++ { KE_KEY, 0x2ba115b7, { KEY_CAMERA } }, ++ { KE_KEY, 0x28a515b7, { KEY_TV } }, ++ { KE_KEY, 0x29a395b7, { KEY_DVD } }, ++ { KE_KEY, 0x29a295b7, { KEY_DVD } }, ++ ++ /* the Menu key between DVD and Subtitle on the RM-200... */ ++ { KE_KEY, 0x2ba385b7, { KEY_MENU } }, ++ { KE_KEY, 0x2ba395b7, { KEY_MENU } }, ++ ++ { KE_KEY, 0x288515b7, { KEY_BOOKMARKS } }, ++ { KE_KEY, 0x2ab715b7, { KEY_MEDIA } }, /* Thumbnail */ ++ { KE_KEY, 0x298595b7, { KEY_SUBTITLE } }, ++ { KE_KEY, 0x2b8595b7, { KEY_LANGUAGE } }, ++ ++ { KE_KEY, 0x29a595b7, { KEY_ZOOM } }, ++ { KE_KEY, 0x2aa395b7, { KEY_SCREEN } }, /* FullScreen */ ++ ++ { KE_KEY, 0x299115b7, { KEY_KEYBOARD } }, ++ { KE_KEY, 0x299135b7, { KEY_KEYBOARD } }, ++ ++ { KE_KEY, 0x01010000, { BTN_LEFT } }, ++ { KE_KEY, 0x01020000, { BTN_RIGHT } }, ++ { KE_KEY, 0x01010080, { BTN_LEFT } }, ++ { KE_KEY, 0x01020080, { BTN_RIGHT } }, ++ ++ { KE_KEY, 0x2a9395b7, { KEY_CYCLEWINDOWS } }, /* TaskSwitcher */ ++ { KE_KEY, 0x2b8395b7, { KEY_TIME } }, /* Timer */ ++ ++ { KE_KEY, 0x289115b7, { KEY_POWER } }, ++ { KE_KEY, 0x29b195b7, { KEY_EJECTCD } }, /* the one next to play */ ++ { KE_KEY, 0x299395b7, { KEY_EJECTCLOSECD } }, /* eject (by TaskSw) */ ++ ++ { KE_KEY, 0x02800000, { KEY_MENU } }, /* Left Menu */ ++ { KE_KEY, 0x02000065, { KEY_COMPOSE } }, /* RightMenu */ ++ { KE_KEY, 0x2ab195b7, { KEY_PROG1 } }, /* Go */ ++ { KE_KEY, 0x29b715b7, { KEY_DASHBOARD } }, /* AppLauncher */ ++ { KE_END, 0 } ++}; ++ ++/* mce-mode imon mce remote key table */ ++static const struct key_entry imon_mce_key_table[] = { ++ /* keys sorted mostly by frequency of use to optimize lookups */ ++ { KE_KEY, 0x800f8415, { KEY_REWIND } }, ++ { KE_KEY, 0x800f8414, { KEY_FASTFORWARD } }, ++ { KE_KEY, 0x800f841b, { KEY_PREVIOUS } }, ++ { KE_KEY, 0x800f841a, { KEY_NEXT } }, ++ ++ { KE_KEY, 0x800f8416, { KEY_PLAY } }, ++ { KE_KEY, 0x800f8418, { KEY_PAUSE } }, ++ { KE_KEY, 0x800f8418, { KEY_PAUSE } }, ++ { KE_KEY, 0x800f8419, { KEY_STOP } }, ++ { KE_KEY, 0x800f8417, { KEY_RECORD } }, ++ ++ { KE_KEY, 0x02000052, { KEY_UP } }, ++ { KE_KEY, 0x02000051, { KEY_DOWN } }, ++ { KE_KEY, 0x02000050, { KEY_LEFT } }, ++ { KE_KEY, 0x0200004f, { KEY_RIGHT } }, ++ ++ { KE_KEY, 0x02000028, { KEY_ENTER } }, ++/* the OK and Enter buttons decode to the same value ++ { KE_KEY, 0x02000028, { KEY_OK } }, */ ++ { KE_KEY, 0x0200002a, { KEY_EXIT } }, ++ { KE_KEY, 0x02000029, { KEY_DELETE } }, ++ ++ { KE_KEY, 0x800f840e, { KEY_MUTE } }, ++ { KE_KEY, 0x800f8410, { KEY_VOLUMEUP } }, ++ { KE_KEY, 0x800f8411, { KEY_VOLUMEDOWN } }, ++ { KE_KEY, 0x800f8412, { KEY_CHANNELUP } }, ++ { KE_KEY, 0x800f8413, { KEY_CHANNELDOWN } }, ++ ++ { KE_KEY, 0x0200001e, { KEY_NUMERIC_1 } }, ++ { KE_KEY, 0x0200001f, { KEY_NUMERIC_2 } }, ++ { KE_KEY, 0x02000020, { KEY_NUMERIC_3 } }, ++ { KE_KEY, 0x02000021, { KEY_NUMERIC_4 } }, ++ { KE_KEY, 0x02000022, { KEY_NUMERIC_5 } }, ++ { KE_KEY, 0x02000023, { KEY_NUMERIC_6 } }, ++ { KE_KEY, 0x02000024, { KEY_NUMERIC_7 } }, ++ { KE_KEY, 0x02000025, { KEY_NUMERIC_8 } }, ++ { KE_KEY, 0x02000026, { KEY_NUMERIC_9 } }, ++ { KE_KEY, 0x02000027, { KEY_NUMERIC_0 } }, ++ ++ { KE_KEY, 0x02200025, { KEY_NUMERIC_STAR } }, ++ { KE_KEY, 0x02200020, { KEY_NUMERIC_POUND } }, ++ ++ { KE_KEY, 0x800f8446, { KEY_TV } }, ++ { KE_KEY, 0x800f8447, { KEY_AUDIO } }, ++ { KE_KEY, 0x800f8448, { KEY_PVR } }, /* RecordedTV */ ++ { KE_KEY, 0x800f8449, { KEY_CAMERA } }, ++ { KE_KEY, 0x800f844a, { KEY_VIDEO } }, ++ { KE_KEY, 0x800f8424, { KEY_DVD } }, ++ { KE_KEY, 0x800f8425, { KEY_TUNER } }, /* LiveTV */ ++ ++ { KE_KEY, 0x800f845b, { KEY_RED } }, ++ { KE_KEY, 0x800f845c, { KEY_GREEN } }, ++ { KE_KEY, 0x800f845d, { KEY_YELLOW } }, ++ { KE_KEY, 0x800f845e, { KEY_BLUE } }, ++ ++ { KE_KEY, 0x800f840f, { KEY_INFO } }, ++ { KE_KEY, 0x800f8426, { KEY_EPG } }, /* Guide */ ++ { KE_KEY, 0x800f845a, { KEY_SUBTITLE } }, /* Caption */ ++ ++ { KE_KEY, 0x800f840c, { KEY_POWER } }, ++ { KE_KEY, 0x800f840d, { KEY_PROG1 } }, /* Windows MCE button */ ++ { KE_END, 0 } ++ ++}; ++ ++/* imon receiver front panel/knob key table */ ++static const struct { ++ u64 hw_code; ++ u16 keycode; ++} imon_panel_key_table[] = { ++ { 0x000000000f000fee, KEY_PROG1 }, /* Go */ ++ { 0x000000001f000fee, KEY_AUDIO }, ++ { 0x0000000020000fee, KEY_VIDEO }, ++ { 0x0000000021000fee, KEY_CAMERA }, ++ { 0x0000000027000fee, KEY_DVD }, ++/* the TV key on my panel is broken, doesn't work under any OS ++ { 0x0000000000000fee, KEY_TV }, */ ++ { 0x0000000005000fee, KEY_PREVIOUS }, ++ { 0x0000000007000fee, KEY_REWIND }, ++ { 0x0000000004000fee, KEY_STOP }, ++ { 0x000000003c000fee, KEY_PLAYPAUSE }, ++ { 0x0000000008000fee, KEY_FASTFORWARD }, ++ { 0x0000000006000fee, KEY_NEXT }, ++ { 0x0000000100000fee, KEY_RIGHT }, ++ { 0x0000010000000fee, KEY_LEFT }, ++ { 0x000000003d000fee, KEY_SELECT }, ++ { 0x0001000000000fee, KEY_VOLUMEUP }, ++ { 0x0100000000000fee, KEY_VOLUMEDOWN }, ++ { 0x0000000001000fee, KEY_MUTE }, ++}; ++ ++/* to prevent races between open() and disconnect(), probing, etc */ ++static DEFINE_MUTEX(driver_lock); ++ ++/* Module bookkeeping bits */ ++MODULE_AUTHOR(MOD_AUTHOR); ++MODULE_DESCRIPTION(MOD_DESC); ++MODULE_VERSION(MOD_VERSION); ++MODULE_LICENSE("GPL"); ++MODULE_DEVICE_TABLE(usb, imon_usb_id_table); ++ ++static bool debug; ++module_param(debug, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)"); ++ ++/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */ ++static int display_type; ++module_param(display_type, int, S_IRUGO); ++MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, " ++ "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)"); ++ ++/* IR protocol: native iMON, Windows MCE (RC-6), or iMON w/o PAD stabilize */ ++static int ir_protocol; ++module_param(ir_protocol, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(ir_protocol, "Which IR protocol to use. 0=native iMON, " ++ "1=Windows Media Center Ed. (RC-6), 2=iMON w/o PAD stabilize " ++ "(default: native iMON)"); ++ ++/* ++ * In certain use cases, mouse mode isn't really helpful, and could actually ++ * cause confusion, so allow disabling it when the IR device is open. ++ */ ++static bool nomouse; ++module_param(nomouse, bool, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is " ++ "open. 0=don't disable, 1=disable. (default: don't disable)"); ++ ++/* threshold at which a pad push registers as an arrow key in kbd mode */ ++static int pad_thresh; ++module_param(pad_thresh, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an " ++ "arrow key in kbd mode (default: 28)"); ++ ++ ++static void free_imon_context(struct imon_context *ictx) ++{ ++ struct device *dev = ictx->dev; ++ ++ usb_free_urb(ictx->tx_urb); ++ usb_free_urb(ictx->rx_urb_intf0); ++ usb_free_urb(ictx->rx_urb_intf1); ++ kfree(ictx); ++ ++ dev_dbg(dev, "%s: iMON context freed\n", __func__); ++} ++ ++/** ++ * Called when the Display device (e.g. /dev/lcd0) ++ * is opened by the application. ++ */ ++static int display_open(struct inode *inode, struct file *file) ++{ ++ struct usb_interface *interface; ++ struct imon_context *ictx = NULL; ++ int subminor; ++ int retval = 0; ++ ++ /* prevent races with disconnect */ ++ mutex_lock(&driver_lock); ++ ++ subminor = iminor(inode); ++ interface = usb_find_interface(&imon_driver, subminor); ++ if (!interface) { ++ err("%s: could not find interface for minor %d", ++ __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ictx = usb_get_intfdata(interface); ++ ++ if (!ictx) { ++ err("%s: no context found for minor %d", __func__, subminor); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ mutex_lock(&ictx->lock); ++ ++ if (!ictx->display_supported) { ++ err("%s: display not supported by device", __func__); ++ retval = -ENODEV; ++ } else if (ictx->display_isopen) { ++ err("%s: display port is already open", __func__); ++ retval = -EBUSY; ++ } else { ++ ictx->display_isopen = 1; ++ file->private_data = ictx; ++ dev_dbg(ictx->dev, "display port opened\n"); ++ } ++ ++ mutex_unlock(&ictx->lock); ++ ++exit: ++ mutex_unlock(&driver_lock); ++ return retval; ++} ++ ++/** ++ * Called when the display device (e.g. /dev/lcd0) ++ * is closed by the application. ++ */ ++static int display_close(struct inode *inode, struct file *file) ++{ ++ struct imon_context *ictx = NULL; ++ int retval = 0; ++ ++ ictx = (struct imon_context *)file->private_data; ++ ++ if (!ictx) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&ictx->lock); ++ ++ if (!ictx->display_supported) { ++ err("%s: display not supported by device", __func__); ++ retval = -ENODEV; ++ } else if (!ictx->display_isopen) { ++ err("%s: display is not open", __func__); ++ retval = -EIO; ++ } else { ++ ictx->display_isopen = 0; ++ dev_dbg(ictx->dev, "display port closed\n"); ++ if (!ictx->dev_present_intf0) { ++ /* ++ * Device disconnected before close and IR port is not ++ * open. If IR port is open, context will be deleted by ++ * ir_close. ++ */ ++ mutex_unlock(&ictx->lock); ++ free_imon_context(ictx); ++ return retval; ++ } ++ } ++ ++ mutex_unlock(&ictx->lock); ++ return retval; ++} ++ ++/** ++ * Sends a packet to the device -- this function must be called ++ * with ictx->lock held. ++ */ ++static int send_packet(struct imon_context *ictx) ++{ ++ unsigned int pipe; ++ int interval = 0; ++ int retval = 0; ++ struct usb_ctrlrequest *control_req = NULL; ++ ++ /* Check if we need to use control or interrupt urb */ ++ if (!ictx->tx_control) { ++ pipe = usb_sndintpipe(ictx->usbdev_intf0, ++ ictx->tx_endpoint->bEndpointAddress); ++ interval = ictx->tx_endpoint->bInterval; ++ ++ usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, ++ ictx->usb_tx_buf, ++ sizeof(ictx->usb_tx_buf), ++ usb_tx_callback, ictx, interval); ++ ++ ictx->tx_urb->actual_length = 0; ++ } else { ++ /* fill request into kmalloc'ed space: */ ++ control_req = kmalloc(sizeof(struct usb_ctrlrequest), ++ GFP_KERNEL); ++ if (control_req == NULL) ++ return -ENOMEM; ++ ++ /* setup packet is '21 09 0200 0001 0008' */ ++ control_req->bRequestType = 0x21; ++ control_req->bRequest = 0x09; ++ control_req->wValue = cpu_to_le16(0x0200); ++ control_req->wIndex = cpu_to_le16(0x0001); ++ control_req->wLength = cpu_to_le16(0x0008); ++ ++ /* control pipe is endpoint 0x00 */ ++ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0); ++ ++ /* build the control urb */ ++ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0, ++ pipe, (unsigned char *)control_req, ++ ictx->usb_tx_buf, ++ sizeof(ictx->usb_tx_buf), ++ usb_tx_callback, ictx); ++ ictx->tx_urb->actual_length = 0; ++ } ++ ++ init_completion(&ictx->tx.finished); ++ ictx->tx.busy = 1; ++ smp_rmb(); /* ensure later readers know we're busy */ ++ ++ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL); ++ if (retval) { ++ ictx->tx.busy = 0; ++ smp_rmb(); /* ensure later readers know we're not busy */ ++ err("%s: error submitting urb(%d)", __func__, retval); ++ } else { ++ /* Wait for transmission to complete (or abort) */ ++ mutex_unlock(&ictx->lock); ++ retval = wait_for_completion_interruptible( ++ &ictx->tx.finished); ++ if (retval) ++ err("%s: task interrupted", __func__); ++ mutex_lock(&ictx->lock); ++ ++ retval = ictx->tx.status; ++ if (retval) ++ err("%s: packet tx failed (%d)", __func__, retval); ++ } ++ ++ kfree(control_req); ++ ++ return retval; ++} ++ ++/** ++ * Sends an associate packet to the iMON 2.4G. ++ * ++ * This might not be such a good idea, since it has an id collision with ++ * some versions of the "IR & VFD" combo. The only way to determine if it ++ * is an RF version is to look at the product description string. (Which ++ * we currently do not fetch). ++ */ ++static int send_associate_24g(struct imon_context *ictx) ++{ ++ int retval; ++ const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x20 }; ++ ++ if (!ictx) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ if (!ictx->dev_present_intf0) { ++ err("%s: no iMON device present", __func__); ++ return -ENODEV; ++ } ++ ++ memcpy(ictx->usb_tx_buf, packet, sizeof(packet)); ++ retval = send_packet(ictx); ++ ++ return retval; ++} ++ ++/** ++ * Sends packets to setup and show clock on iMON display ++ * ++ * Arguments: year - last 2 digits of year, month - 1..12, ++ * day - 1..31, dow - day of the week (0-Sun...6-Sat), ++ * hour - 0..23, minute - 0..59, second - 0..59 ++ */ ++static int send_set_imon_clock(struct imon_context *ictx, ++ unsigned int year, unsigned int month, ++ unsigned int day, unsigned int dow, ++ unsigned int hour, unsigned int minute, ++ unsigned int second) ++{ ++ unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8]; ++ int retval = 0; ++ int i; ++ ++ if (!ictx) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ switch (ictx->display_type) { ++ case IMON_DISPLAY_TYPE_LCD: ++ clock_enable_pkt[0][0] = 0x80; ++ clock_enable_pkt[0][1] = year; ++ clock_enable_pkt[0][2] = month-1; ++ clock_enable_pkt[0][3] = day; ++ clock_enable_pkt[0][4] = hour; ++ clock_enable_pkt[0][5] = minute; ++ clock_enable_pkt[0][6] = second; ++ ++ clock_enable_pkt[1][0] = 0x80; ++ clock_enable_pkt[1][1] = 0; ++ clock_enable_pkt[1][2] = 0; ++ clock_enable_pkt[1][3] = 0; ++ clock_enable_pkt[1][4] = 0; ++ clock_enable_pkt[1][5] = 0; ++ clock_enable_pkt[1][6] = 0; ++ ++ if (ictx->product == 0xffdc) { ++ clock_enable_pkt[0][7] = 0x50; ++ clock_enable_pkt[1][7] = 0x51; ++ } else { ++ clock_enable_pkt[0][7] = 0x88; ++ clock_enable_pkt[1][7] = 0x8a; ++ } ++ ++ break; ++ ++ case IMON_DISPLAY_TYPE_VFD: ++ clock_enable_pkt[0][0] = year; ++ clock_enable_pkt[0][1] = month-1; ++ clock_enable_pkt[0][2] = day; ++ clock_enable_pkt[0][3] = dow; ++ clock_enable_pkt[0][4] = hour; ++ clock_enable_pkt[0][5] = minute; ++ clock_enable_pkt[0][6] = second; ++ clock_enable_pkt[0][7] = 0x40; ++ ++ clock_enable_pkt[1][0] = 0; ++ clock_enable_pkt[1][1] = 0; ++ clock_enable_pkt[1][2] = 1; ++ clock_enable_pkt[1][3] = 0; ++ clock_enable_pkt[1][4] = 0; ++ clock_enable_pkt[1][5] = 0; ++ clock_enable_pkt[1][6] = 0; ++ clock_enable_pkt[1][7] = 0x42; ++ ++ break; ++ ++ default: ++ return -ENODEV; ++ } ++ ++ for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) { ++ memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8); ++ retval = send_packet(ictx); ++ if (retval) { ++ err("%s: send_packet failed for packet %d", ++ __func__, i); ++ break; ++ } ++ } ++ ++ return retval; ++} ++ ++/** ++ * These are the sysfs functions to handle the association on the iMON 2.4G LT. ++ */ ++static ssize_t show_associate_remote(struct device *d, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct imon_context *ictx = dev_get_drvdata(d); ++ ++ if (!ictx) ++ return -ENODEV; ++ ++ mutex_lock(&ictx->lock); ++ if (ictx->ir_isassociating) ++ strcpy(buf, "associating\n"); ++ else ++ strcpy(buf, "closed\n"); ++ ++ dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for " ++ "instructions on how to associate your iMON 2.4G DT/LT " ++ "remote\n"); ++ mutex_unlock(&ictx->lock); ++ return strlen(buf); ++} ++ ++static ssize_t store_associate_remote(struct device *d, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct imon_context *ictx; ++ ++ ictx = dev_get_drvdata(d); ++ ++ if (!ictx) ++ return -ENODEV; ++ ++ mutex_lock(&ictx->lock); ++ ictx->ir_isassociating = 1; ++ send_associate_24g(ictx); ++ mutex_unlock(&ictx->lock); ++ ++ return count; ++} ++ ++/** ++ * sysfs functions to control internal imon clock ++ */ ++static ssize_t show_imon_clock(struct device *d, ++ struct device_attribute *attr, char *buf) ++{ ++ struct imon_context *ictx = dev_get_drvdata(d); ++ size_t len; ++ ++ if (!ictx) ++ return -ENODEV; ++ ++ mutex_lock(&ictx->lock); ++ ++ if (!ictx->display_supported) { ++ len = snprintf(buf, PAGE_SIZE, "Not supported."); ++ } else { ++ len = snprintf(buf, PAGE_SIZE, ++ "To set the clock on your iMON display:\n" ++ "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n" ++ "%s", ictx->display_isopen ? ++ "\nNOTE: imon device must be closed\n" : ""); ++ } ++ ++ mutex_unlock(&ictx->lock); ++ ++ return len; ++} ++ ++static ssize_t store_imon_clock(struct device *d, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct imon_context *ictx = dev_get_drvdata(d); ++ ssize_t retval; ++ unsigned int year, month, day, dow, hour, minute, second; ++ ++ if (!ictx) ++ return -ENODEV; ++ ++ mutex_lock(&ictx->lock); ++ ++ if (!ictx->display_supported) { ++ retval = -ENODEV; ++ goto exit; ++ } else if (ictx->display_isopen) { ++ retval = -EBUSY; ++ goto exit; ++ } ++ ++ if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow, ++ &hour, &minute, &second) != 7) { ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ if ((month < 1 || month > 12) || ++ (day < 1 || day > 31) || (dow > 6) || ++ (hour > 23) || (minute > 59) || (second > 59)) { ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ retval = send_set_imon_clock(ictx, year, month, day, dow, ++ hour, minute, second); ++ if (retval) ++ goto exit; ++ ++ retval = count; ++exit: ++ mutex_unlock(&ictx->lock); ++ ++ return retval; ++} ++ ++ ++static DEVICE_ATTR(imon_clock, S_IWUSR | S_IRUGO, show_imon_clock, ++ store_imon_clock); ++ ++static DEVICE_ATTR(associate_remote, S_IWUSR | S_IRUGO, show_associate_remote, ++ store_associate_remote); ++ ++static struct attribute *imon_display_sysfs_entries[] = { ++ &dev_attr_imon_clock.attr, ++ NULL ++}; ++ ++static struct attribute_group imon_display_attribute_group = { ++ .attrs = imon_display_sysfs_entries ++}; ++ ++static struct attribute *imon_rf_sysfs_entries[] = { ++ &dev_attr_associate_remote.attr, ++ NULL ++}; ++ ++static struct attribute_group imon_rf_attribute_group = { ++ .attrs = imon_rf_sysfs_entries ++}; ++ ++/** ++ * Writes data to the VFD. The iMON VFD is 2x16 characters ++ * and requires data in 5 consecutive USB interrupt packets, ++ * each packet but the last carrying 7 bytes. ++ * ++ * I don't know if the VFD board supports features such as ++ * scrolling, clearing rows, blanking, etc. so at ++ * the caller must provide a full screen of data. If fewer ++ * than 32 bytes are provided spaces will be appended to ++ * generate a full screen. ++ */ ++static ssize_t vfd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int i; ++ int offset; ++ int seq; ++ int retval = 0; ++ struct imon_context *ictx; ++ const unsigned char vfd_packet6[] = { ++ 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; ++ ++ ictx = (struct imon_context *)file->private_data; ++ if (!ictx) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&ictx->lock); ++ ++ if (!ictx->dev_present_intf0) { ++ err("%s: no iMON device present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes <= 0 || n_bytes > 32) { ++ err("%s: invalid payload size", __func__); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) { ++ retval = -EFAULT; ++ goto exit; ++ } ++ ++ /* Pad with spaces */ ++ for (i = n_bytes; i < 32; ++i) ++ ictx->tx.data_buf[i] = ' '; ++ ++ for (i = 32; i < 35; ++i) ++ ictx->tx.data_buf[i] = 0xFF; ++ ++ offset = 0; ++ seq = 0; ++ ++ do { ++ memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7); ++ ictx->usb_tx_buf[7] = (unsigned char) seq; ++ ++ retval = send_packet(ictx); ++ if (retval) { ++ err("%s: send packet failed for packet #%d", ++ __func__, seq/2); ++ goto exit; ++ } else { ++ seq += 2; ++ offset += 7; ++ } ++ ++ } while (offset < 35); ++ ++ /* Send packet #6 */ ++ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); ++ ictx->usb_tx_buf[7] = (unsigned char) seq; ++ retval = send_packet(ictx); ++ if (retval) ++ err("%s: send packet failed for packet #%d", ++ __func__, seq / 2); ++ ++exit: ++ mutex_unlock(&ictx->lock); ++ ++ return (!retval) ? n_bytes : retval; ++} ++ ++/** ++ * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte ++ * packets. We accept data as 16 hexadecimal digits, followed by a ++ * newline (to make it easy to drive the device from a command-line ++ * -- even though the actual binary data is a bit complicated). ++ * ++ * The device itself is not a "traditional" text-mode display. It's ++ * actually a 16x96 pixel bitmap display. That means if you want to ++ * display text, you've got to have your own "font" and translate the ++ * text into bitmaps for display. This is really flexible (you can ++ * display whatever diacritics you need, and so on), but it's also ++ * a lot more complicated than most LCDs... ++ */ ++static ssize_t lcd_write(struct file *file, const char *buf, ++ size_t n_bytes, loff_t *pos) ++{ ++ int retval = 0; ++ struct imon_context *ictx; ++ ++ ictx = (struct imon_context *)file->private_data; ++ if (!ictx) { ++ err("%s: no context for device", __func__); ++ return -ENODEV; ++ } ++ ++ mutex_lock(&ictx->lock); ++ ++ if (!ictx->display_supported) { ++ err("%s: no iMON display present", __func__); ++ retval = -ENODEV; ++ goto exit; ++ } ++ ++ if (n_bytes != 8) { ++ err("%s: invalid payload size: %d (expecting 8)", ++ __func__, (int) n_bytes); ++ retval = -EINVAL; ++ goto exit; ++ } ++ ++ if (copy_from_user(ictx->usb_tx_buf, buf, 8)) { ++ retval = -EFAULT; ++ goto exit; ++ } ++ ++ retval = send_packet(ictx); ++ if (retval) { ++ err("%s: send packet failed!", __func__); ++ goto exit; ++ } else { ++ dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n", ++ __func__, (int) n_bytes); ++ } ++exit: ++ mutex_unlock(&ictx->lock); ++ return (!retval) ? n_bytes : retval; ++} ++ ++/** ++ * Callback function for USB core API: transmit data ++ */ ++static void usb_tx_callback(struct urb *urb) ++{ ++ struct imon_context *ictx; ++ ++ if (!urb) ++ return; ++ ictx = (struct imon_context *)urb->context; ++ if (!ictx) ++ return; ++ ++ ictx->tx.status = urb->status; ++ ++ /* notify waiters that write has finished */ ++ ictx->tx.busy = 0; ++ smp_rmb(); /* ensure later readers know we're not busy */ ++ complete(&ictx->tx.finished); ++} ++ ++/** ++ * iMON IR receivers support two different signal sets -- those used by ++ * the iMON remotes, and those used by the Windows MCE remotes (which is ++ * really just RC-6), but only one or the other at a time, as the signals ++ * are decoded onboard the receiver. ++ */ ++static void imon_set_ir_protocol(struct imon_context *ictx) ++{ ++ int retval; ++ struct device *dev = ictx->dev; ++ unsigned char ir_proto_packet[] = { ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; ++ ++ switch (ir_protocol) { ++ case IMON_IR_PROTOCOL_MCE: ++ dev_dbg(dev, "Configuring IR receiver for MCE protocol\n"); ++ ir_proto_packet[0] = 0x01; ++ ictx->ir_protocol = IMON_IR_PROTOCOL_MCE; ++ ictx->pad_mouse = 0; ++ break; ++ case IMON_IR_PROTOCOL_IMON: ++ dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); ++ /* ir_proto_packet[0] = 0x00; // already the default */ ++ ictx->ir_protocol = IMON_IR_PROTOCOL_IMON; ++ ictx->pad_mouse = 1; ++ break; ++ case IMON_IR_PROTOCOL_IMON_NOPAD: ++ dev_dbg(dev, "Configuring IR receiver for iMON protocol " ++ "without PAD stabilize function enabled\n"); ++ /* ir_proto_packet[0] = 0x00; // already the default */ ++ ictx->ir_protocol = IMON_IR_PROTOCOL_IMON_NOPAD; ++ ictx->pad_mouse = 0; ++ break; ++ default: ++ dev_info(dev, "%s: unknown IR protocol specified, will " ++ "just default to iMON protocol\n", __func__); ++ ictx->ir_protocol = IMON_IR_PROTOCOL_IMON; ++ ictx->pad_mouse = 1; ++ break; ++ } ++ ++ memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); ++ ++ retval = send_packet(ictx); ++ if (retval) { ++ dev_info(dev, "%s: failed to set IR protocol, falling back " ++ "to standard iMON protocol mode\n", __func__); ++ ir_protocol = IMON_IR_PROTOCOL_IMON; ++ ictx->ir_protocol = IMON_IR_PROTOCOL_IMON; ++ } ++} ++ ++static inline int tv2int(const struct timeval *a, const struct timeval *b) ++{ ++ int usecs = 0; ++ int sec = 0; ++ ++ if (b->tv_usec > a->tv_usec) { ++ usecs = 1000000; ++ sec--; ++ } ++ ++ usecs += a->tv_usec - b->tv_usec; ++ ++ sec += a->tv_sec - b->tv_sec; ++ sec *= 1000; ++ usecs /= 1000; ++ sec += usecs; ++ ++ if (sec < 0) ++ sec = 1000; ++ ++ return sec; ++} ++ ++/** ++ * The directional pad behaves a bit differently, depending on whether this is ++ * one of the older ffdc devices or a newer device. Newer devices appear to ++ * have a higher resolution matrix for more precise mouse movement, but it ++ * makes things overly sensitive in keyboard mode, so we do some interesting ++ * contortions to make it less touchy. Older devices run through the same ++ * routine with shorter timeout and a smaller threshold. ++ */ ++static int stabilize(int a, int b, u16 timeout, u16 threshold) ++{ ++ struct timeval ct; ++ static struct timeval prev_time = {0, 0}; ++ static struct timeval hit_time = {0, 0}; ++ static int x, y, prev_result, hits; ++ int result = 0; ++ int msec, msec_hit; ++ ++ do_gettimeofday(&ct); ++ msec = tv2int(&ct, &prev_time); ++ msec_hit = tv2int(&ct, &hit_time); ++ ++ if (msec > 100) { ++ x = 0; ++ y = 0; ++ hits = 0; ++ } ++ ++ x += a; ++ y += b; ++ ++ prev_time = ct; ++ ++ if (abs(x) > threshold || abs(y) > threshold) { ++ if (abs(y) > abs(x)) ++ result = (y > 0) ? 0x7F : 0x80; ++ else ++ result = (x > 0) ? 0x7F00 : 0x8000; ++ ++ x = 0; ++ y = 0; ++ ++ if (result == prev_result) { ++ hits++; ++ ++ if (hits > 3) { ++ switch (result) { ++ case 0x7F: ++ y = 17 * threshold / 30; ++ break; ++ case 0x80: ++ y -= 17 * threshold / 30; ++ break; ++ case 0x7F00: ++ x = 17 * threshold / 30; ++ break; ++ case 0x8000: ++ x -= 17 * threshold / 30; ++ break; ++ } ++ } ++ ++ if (hits == 2 && msec_hit < timeout) { ++ result = 0; ++ hits = 1; ++ } ++ } else { ++ prev_result = result; ++ hits = 1; ++ hit_time = ct; ++ } ++ } ++ ++ return result; ++} ++ ++static int imon_remote_key_lookup(u32 hw_code) ++{ ++ int i; ++ u32 code = be32_to_cpu(hw_code); ++ ++ /* Look for the initial press of a button */ ++ for (i = 0; i < ARRAY_SIZE(imon_remote_key_table); i++) ++ if (imon_remote_key_table[i].code == code) ++ return i; ++ ++ /* Look for the release of a button, return index + offset */ ++ for (i = 0; i < ARRAY_SIZE(imon_remote_key_table); i++) ++ if ((imon_remote_key_table[i].code | 0x4000) == code) ++ return i + IMON_KEY_RELEASE_OFFSET; ++ ++ return -1; ++} ++ ++static int imon_mce_key_lookup(u32 hw_code) ++{ ++ int i; ++ u32 code = be32_to_cpu(hw_code); ++ ++ for (i = 0; i < ARRAY_SIZE(imon_mce_key_table); i++) ++ if (imon_mce_key_table[i].code == code) ++ return i; ++ ++ for (i = 0; i < ARRAY_SIZE(imon_mce_key_table); i++) ++ if (imon_mce_key_table[i].code == (code | 0x8000)) ++ return i; ++ ++ return -1; ++} ++ ++static int imon_panel_key_lookup(u64 hw_code) ++{ ++ int i; ++ u64 code = be64_to_cpu(hw_code); ++ ++ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) ++ if (imon_panel_key_table[i].hw_code == (code | 0xfee)) ++ return i; ++ ++ return -1; ++} ++ ++static bool imon_mouse_event(struct imon_context *ictx, ++ unsigned char *buf, int len) ++{ ++ char rel_x = 0x00, rel_y = 0x00; ++ u8 right_shift = 1; ++ bool mouse_input = 1; ++ int dir = 0; ++ ++ /* newer iMON device PAD or mouse button */ ++ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) { ++ rel_x = buf[2]; ++ rel_y = buf[3]; ++ right_shift = 1; ++ /* 0xffdc iMON PAD or mouse button input */ ++ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) && ++ !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) { ++ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | ++ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; ++ if (buf[0] & 0x02) ++ rel_x |= ~0x0f; ++ rel_x = rel_x + rel_x / 2; ++ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | ++ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; ++ if (buf[0] & 0x01) ++ rel_y |= ~0x0f; ++ rel_y = rel_y + rel_y / 2; ++ right_shift = 2; ++ /* some ffdc devices decode mouse buttons differently... */ ++ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) { ++ right_shift = 2; ++ /* ch+/- buttons, which we use for an emulated scroll wheel */ ++ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) { ++ dir = 1; ++ } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) { ++ dir = -1; ++ } else ++ mouse_input = 0; ++ ++ if (mouse_input) { ++ dev_dbg(ictx->dev, "sending mouse data via input subsystem\n"); ++ ++ if (dir) { ++ input_report_rel(ictx->idev, REL_WHEEL, dir); ++ } else if (rel_x || rel_y) { ++ input_report_rel(ictx->idev, REL_X, rel_x); ++ input_report_rel(ictx->idev, REL_Y, rel_y); ++ } else { ++ input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1); ++ input_report_key(ictx->idev, BTN_RIGHT, ++ buf[1] >> right_shift & 0x1); ++ } ++ input_sync(ictx->idev); ++ ictx->last_keycode = ictx->kc; ++ } ++ ++ return mouse_input; ++} ++ ++static void imon_touch_event(struct imon_context *ictx, unsigned char *buf) ++{ ++ mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT); ++ ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4); ++ ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf)); ++ input_report_abs(ictx->touch, ABS_X, ictx->touch_x); ++ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); ++ input_report_key(ictx->touch, BTN_TOUCH, 0x01); ++ input_sync(ictx->touch); ++} ++ ++static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) ++{ ++ int ki = 1; ++ int dir = 0; ++ int offset = IMON_KEY_RELEASE_OFFSET; ++ char rel_x = 0x00, rel_y = 0x00; ++ u16 timeout, threshold; ++ u64 temp_key; ++ u32 remote_key; ++ ++ /* ++ * The imon directional pad functions more like a touchpad. Bytes 3 & 4 ++ * contain a position coordinate (x,y), with each component ranging ++ * from -14 to 14. We want to down-sample this to only 4 discrete values ++ * for up/down/left/right arrow keys. Also, when you get too close to ++ * diagonals, it has a tendancy to jump back and forth, so lets try to ++ * ignore when they get too close. ++ */ ++ if (ictx->product != 0xffdc) { ++ /* first, pad to 8 bytes so it conforms with everything else */ ++ buf[5] = buf[6] = buf[7] = 0; ++ timeout = 500; /* in msecs */ ++ /* (2*threshold) x (2*threshold) square */ ++ threshold = pad_thresh ? pad_thresh : 28; ++ rel_x = buf[2]; ++ rel_y = buf[3]; ++ ++ if (ictx->ir_protocol == IMON_IR_PROTOCOL_IMON) { ++ if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) { ++ dir = stabilize((int)rel_x, (int)rel_y, ++ timeout, threshold); ++ if (!dir) { ++ ictx->kc = KEY_UNKNOWN; ++ return; ++ } ++ buf[2] = dir & 0xFF; ++ buf[3] = (dir >> 8) & 0xFF; ++ memcpy(&temp_key, buf, sizeof(temp_key)); ++ remote_key = (u32) (le64_to_cpu(temp_key) ++ & 0xffffffff); ++ ki = imon_remote_key_lookup(remote_key); ++ ictx->kc = ++ imon_remote_key_table[ki % offset].keycode; ++ } ++ } else { ++ if (abs(rel_y) > abs(rel_x)) { ++ buf[2] = (rel_y > 0) ? 0x7F : 0x80; ++ buf[3] = 0; ++ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP; ++ } else { ++ buf[2] = 0; ++ buf[3] = (rel_x > 0) ? 0x7F : 0x80; ++ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT; ++ } ++ } ++ ++ /* ++ * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad ++ * device (15c2:ffdc). The remote generates various codes from ++ * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates ++ * 0x688301b7 and the right one 0x688481b7. All other keys generate ++ * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with ++ * reversed endianess. Extract direction from buffer, rotate endianess, ++ * adjust sign and feed the values into stabilize(). The resulting codes ++ * will be 0x01008000, 0x01007F00, which match the newer devices. ++ */ ++ } else { ++ timeout = 10; /* in msecs */ ++ /* (2*threshold) x (2*threshold) square */ ++ threshold = pad_thresh ? pad_thresh : 15; ++ ++ /* buf[1] is x */ ++ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | ++ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; ++ if (buf[0] & 0x02) ++ rel_x |= ~0x10+1; ++ /* buf[2] is y */ ++ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | ++ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; ++ if (buf[0] & 0x01) ++ rel_y |= ~0x10+1; ++ ++ buf[0] = 0x01; ++ buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0; ++ ++ if (ictx->ir_protocol == IMON_IR_PROTOCOL_IMON) { ++ dir = stabilize((int)rel_x, (int)rel_y, ++ timeout, threshold); ++ if (!dir) { ++ ictx->kc = KEY_UNKNOWN; ++ return; ++ } ++ buf[2] = dir & 0xFF; ++ buf[3] = (dir >> 8) & 0xFF; ++ memcpy(&temp_key, buf, sizeof(temp_key)); ++ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff); ++ ki = imon_remote_key_lookup(remote_key); ++ ictx->kc = imon_remote_key_table[ki % offset].keycode; ++ } else { ++ if (abs(rel_y) > abs(rel_x)) { ++ buf[2] = (rel_y > 0) ? 0x7F : 0x80; ++ buf[3] = 0; ++ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP; ++ } else { ++ buf[2] = 0; ++ buf[3] = (rel_x > 0) ? 0x7F : 0x80; ++ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT; ++ } ++ } ++ } ++ ++ ictx->ki = ki; ++} ++ ++static int imon_parse_press_type(struct imon_context *ictx, ++ unsigned char *buf, u8 ksrc) ++{ ++ int press_type = 0; ++ ++ /* key release of 0x02XXXXXX key */ ++ if (ictx->ki == -1 && buf[0] == 0x02 && buf[3] == 0x00) ++ ictx->kc = ictx->last_keycode; ++ ++ /* mce-specific button handling */ ++ else if (ksrc == IMON_BUTTON_MCE) { ++ /* initial press */ ++ if (ictx->kc != ictx->last_keycode ++ || buf[2] != ictx->mce_toggle_bit) { ++ ictx->last_keycode = ictx->kc; ++ ictx->mce_toggle_bit = buf[2]; ++ press_type = 1; ++ mod_timer(&ictx->itimer, ++ jiffies + msecs_to_jiffies(MCE_TIMEOUT_MS)); ++ /* repeat */ ++ } else { ++ press_type = 2; ++ mod_timer(&ictx->itimer, ++ jiffies + msecs_to_jiffies(MCE_TIMEOUT_MS)); ++ } ++ ++ /* incoherent or irrelevant data */ ++ } else if (ictx->ki == -1) ++ press_type = -EINVAL; ++ ++ /* key release of 0xXXXXXXb7 key */ ++ else if (ictx->ki >= IMON_KEY_RELEASE_OFFSET) ++ press_type = 0; ++ ++ /* this is a button press */ ++ else ++ press_type = 1; ++ ++ return press_type; ++} ++ ++/** ++ * Process the incoming packet ++ */ ++static void imon_incoming_packet(struct imon_context *ictx, ++ struct urb *urb, int intf) ++{ ++ int len = urb->actual_length; ++ unsigned char *buf = urb->transfer_buffer; ++ struct device *dev = ictx->dev; ++ u16 kc; ++ bool norelease = 0; ++ int i, ki; ++ int offset = IMON_KEY_RELEASE_OFFSET; ++ u64 temp_key; ++ u64 panel_key = 0; ++ u32 remote_key; ++ struct input_dev *idev = NULL; ++ int press_type = 0; ++ int msec; ++ struct timeval t; ++ static struct timeval prev_time = { 0, 0 }; ++ u8 ksrc = IMON_BUTTON_IMON; ++ ++ idev = ictx->idev; ++ ++ /* Figure out what key was pressed */ ++ memcpy(&temp_key, buf, sizeof(temp_key)); ++ if (len == 8 && buf[7] == 0xee) { ++ ksrc = IMON_BUTTON_PANEL; ++ panel_key = le64_to_cpu(temp_key); ++ ki = imon_panel_key_lookup(panel_key); ++ kc = imon_panel_key_table[ki].keycode; ++ } else { ++ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff); ++ if (ictx->ir_protocol == IMON_IR_PROTOCOL_MCE) { ++ if (buf[0] == 0x80) ++ ksrc = IMON_BUTTON_MCE; ++ ki = imon_mce_key_lookup(remote_key); ++ kc = imon_mce_key_table[ki].keycode; ++ } else { ++ ki = imon_remote_key_lookup(remote_key); ++ kc = imon_remote_key_table[ki % offset].keycode; ++ } ++ } ++ ++ /* keyboard/mouse mode toggle button */ ++ if (kc == KEY_KEYBOARD && ki < offset) { ++ if (!nomouse) { ++ ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1; ++ dev_dbg(dev, "toggling to %s mode\n", ++ ictx->pad_mouse ? "mouse" : "keyboard"); ++ } else { ++ ictx->pad_mouse = 0; ++ dev_dbg(dev, "mouse mode was disabled by modparam\n"); ++ } ++ ictx->last_keycode = kc; ++ return; ++ } ++ ++ ictx->ki = ki; ++ ictx->kc = kc; ++ ++ /* send touchscreen events through input subsystem if touchpad data */ ++ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && ++ buf[7] == 0x86) { ++ imon_touch_event(ictx, buf); ++ ++ /* look for mouse events with pad in mouse mode */ ++ } else if (ictx->pad_mouse) { ++ if (imon_mouse_event(ictx, buf, len)) ++ return; ++ } ++ ++ /* Now for some special handling to convert pad input to arrow keys */ ++ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) || ++ ((len == 8) && (buf[0] & 0x40) && ++ !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) { ++ len = 8; ++ imon_pad_to_keys(ictx, buf); ++ norelease = 1; ++ } ++ ++ if (debug) { ++ printk(KERN_INFO "intf%d decoded packet: ", intf); ++ for (i = 0; i < len; ++i) ++ printk("%02x ", buf[i]); ++ printk("\n"); ++ } ++ ++ press_type = imon_parse_press_type(ictx, buf, ksrc); ++ if (press_type < 0) ++ goto not_input_data; ++ ++ /* KEY_MUTE repeats from MCE and knob need to be suppressed */ ++ if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) ++ && (buf[7] == 0xee || ksrc == IMON_BUTTON_MCE)) { ++ do_gettimeofday(&t); ++ msec = tv2int(&t, &prev_time); ++ prev_time = t; ++ if (msec < 200) ++ return; ++ } ++ ++ input_report_key(idev, ictx->kc, press_type); ++ input_sync(idev); ++ ++ /* panel keys and some remote keys don't generate a release */ ++ if (panel_key || norelease) { ++ input_report_key(idev, ictx->kc, 0); ++ input_sync(idev); ++ } ++ ++ ictx->last_keycode = ictx->kc; ++ ++ return; ++ ++not_input_data: ++ if (len != 8) { ++ dev_warn(dev, "imon %s: invalid incoming packet " ++ "size (len = %d, intf%d)\n", __func__, len, intf); ++ return; ++ } ++ ++ /* iMON 2.4G associate frame */ ++ if (buf[0] == 0x00 && ++ buf[2] == 0xFF && /* REFID */ ++ buf[3] == 0xFF && ++ buf[4] == 0xFF && ++ buf[5] == 0xFF && /* iMON 2.4G */ ++ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */ ++ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */ ++ dev_warn(dev, "%s: remote associated refid=%02X\n", ++ __func__, buf[1]); ++ ictx->ir_isassociating = 0; ++ } ++} ++ ++/** ++ * mce/rc6 keypresses have no distinct release code, use timer ++ */ ++static void imon_mce_timeout(unsigned long data) ++{ ++ struct imon_context *ictx = (struct imon_context *)data; ++ ++ input_report_key(ictx->idev, ictx->last_keycode, 0); ++ input_sync(ictx->idev); ++} ++ ++/** ++ * report touchscreen input ++ */ ++static void imon_touch_display_timeout(unsigned long data) ++{ ++ struct imon_context *ictx = (struct imon_context *)data; ++ ++ if (!ictx->display_type == IMON_DISPLAY_TYPE_VGA) ++ return; ++ ++ input_report_abs(ictx->touch, ABS_X, ictx->touch_x); ++ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); ++ input_report_key(ictx->touch, BTN_TOUCH, 0x00); ++ input_sync(ictx->touch); ++} ++ ++/** ++ * Callback function for USB core API: receive data ++ */ ++static void usb_rx_callback_intf0(struct urb *urb) ++{ ++ struct imon_context *ictx; ++ unsigned char *buf; ++ int len; ++ int intfnum = 0; ++ ++ if (!urb) ++ return; ++ ++ ictx = (struct imon_context *)urb->context; ++ if (!ictx) ++ return; ++ ++ buf = urb->transfer_buffer; ++ len = urb->actual_length; ++ ++ switch (urb->status) { ++ case -ENOENT: /* usbcore unlink successful! */ ++ return; ++ ++ case -ESHUTDOWN: /* transport endpoint was shut down */ ++ break; ++ ++ case 0: ++ imon_incoming_packet(ictx, urb, intfnum); ++ break; ++ ++ default: ++ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", ++ __func__, urb->status); ++ break; ++ } ++ ++ usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); ++} ++ ++static void usb_rx_callback_intf1(struct urb *urb) ++{ ++ struct imon_context *ictx; ++ unsigned char *buf; ++ int len; ++ int intfnum = 1; ++ ++ if (!urb) ++ return; ++ ++ ictx = (struct imon_context *)urb->context; ++ if (!ictx) ++ return; ++ ++ buf = urb->transfer_buffer; ++ len = urb->actual_length; ++ ++ switch (urb->status) { ++ case -ENOENT: /* usbcore unlink successful! */ ++ return; ++ ++ case -ESHUTDOWN: /* transport endpoint was shut down */ ++ break; ++ ++ case 0: ++ imon_incoming_packet(ictx, urb, intfnum); ++ break; ++ ++ default: ++ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", ++ __func__, urb->status); ++ break; ++ } ++ ++ usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); ++} ++ ++static struct input_dev *imon_init_idev(struct imon_context *ictx) ++{ ++ struct input_dev *idev; ++ int ret, i; ++ ++ idev = input_allocate_device(); ++ if (!idev) { ++ dev_err(ictx->dev, "remote input dev allocation failed\n"); ++ goto idev_alloc_failed; ++ } ++ ++ snprintf(ictx->name_idev, sizeof(ictx->name_idev), ++ "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product); ++ idev->name = ictx->name_idev; ++ ++ usb_make_path(ictx->usbdev_intf0, ictx->phys_idev, ++ sizeof(ictx->phys_idev)); ++ strlcat(ictx->phys_idev, "/input0", sizeof(ictx->phys_idev)); ++ idev->phys = ictx->phys_idev; ++ ++ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); ++ ++ idev->keybit[BIT_WORD(BTN_MOUSE)] = ++ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); ++ idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) | ++ BIT_MASK(REL_WHEEL); ++ ++ input_set_drvdata(idev, ictx); ++ ++ if (ir_protocol == IMON_IR_PROTOCOL_MCE) ++ ret = sparse_keymap_setup(idev, imon_mce_key_table, NULL); ++ else ++ ret = sparse_keymap_setup(idev, imon_remote_key_table, NULL); ++ if (ret) ++ goto keymap_failed; ++ ++ /* can't use sparse keymap atm, 64-bit keycodes */ ++ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) { ++ u16 kc = imon_panel_key_table[i].keycode; ++ __set_bit(kc, idev->keybit); ++ } ++ ++ usb_to_input_id(ictx->usbdev_intf0, &idev->id); ++ idev->dev.parent = ictx->dev; ++ ret = input_register_device(idev); ++ if (ret < 0) { ++ dev_err(ictx->dev, "remote input dev register failed\n"); ++ goto idev_register_failed; ++ } ++ ++ return idev; ++ ++idev_register_failed: ++ sparse_keymap_free(idev); ++keymap_failed: ++ input_free_device(idev); ++idev_alloc_failed: ++ ++ return NULL; ++} ++ ++static struct input_dev *imon_init_touch(struct imon_context *ictx) ++{ ++ struct input_dev *touch; ++ int ret; ++ ++ touch = input_allocate_device(); ++ if (!touch) { ++ dev_err(ictx->dev, "touchscreen input dev allocation failed\n"); ++ goto touch_alloc_failed; ++ } ++ ++ snprintf(ictx->name_touch, sizeof(ictx->name_touch), ++ "iMON USB Touchscreen (%04x:%04x)", ++ ictx->vendor, ictx->product); ++ touch->name = ictx->name_touch; ++ ++ usb_make_path(ictx->usbdev_intf1, ictx->phys_touch, ++ sizeof(ictx->phys_touch)); ++ strlcat(ictx->phys_touch, "/input1", sizeof(ictx->phys_touch)); ++ touch->phys = ictx->phys_touch; ++ ++ touch->evbit[0] = ++ BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); ++ touch->keybit[BIT_WORD(BTN_TOUCH)] = ++ BIT_MASK(BTN_TOUCH); ++ input_set_abs_params(touch, ABS_X, ++ 0x00, 0xfff, 0, 0); ++ input_set_abs_params(touch, ABS_Y, ++ 0x00, 0xfff, 0, 0); ++ ++ input_set_drvdata(touch, ictx); ++ ++ usb_to_input_id(ictx->usbdev_intf1, &touch->id); ++ touch->dev.parent = ictx->dev; ++ ret = input_register_device(touch); ++ if (ret < 0) { ++ dev_info(ictx->dev, "touchscreen input dev register failed\n"); ++ goto touch_register_failed; ++ } ++ ++ return touch; ++ ++touch_register_failed: ++ input_free_device(ictx->touch); ++ mutex_unlock(&ictx->lock); ++ ++touch_alloc_failed: ++ return NULL; ++} ++ ++static bool imon_find_endpoints(struct imon_context *ictx, ++ struct usb_host_interface *iface_desc) ++{ ++ struct usb_endpoint_descriptor *ep; ++ struct usb_endpoint_descriptor *rx_endpoint = NULL; ++ struct usb_endpoint_descriptor *tx_endpoint = NULL; ++ int ifnum = iface_desc->desc.bInterfaceNumber; ++ int num_endpts = iface_desc->desc.bNumEndpoints; ++ int i, ep_dir, ep_type; ++ bool ir_ep_found = 0; ++ bool display_ep_found = 0; ++ bool tx_control = 0; ++ ++ /* ++ * Scan the endpoint list and set: ++ * first input endpoint = IR endpoint ++ * first output endpoint = display endpoint ++ */ ++ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { ++ ep = &iface_desc->endpoint[i].desc; ++ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ++ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if (!ir_ep_found && ep_dir == USB_DIR_IN && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ ++ rx_endpoint = ep; ++ ir_ep_found = 1; ++ dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__); ++ ++ } else if (!display_ep_found && ep_dir == USB_DIR_OUT && ++ ep_type == USB_ENDPOINT_XFER_INT) { ++ tx_endpoint = ep; ++ display_ep_found = 1; ++ dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__); ++ } ++ } ++ ++ if (ifnum == 0) { ++ ictx->rx_endpoint_intf0 = rx_endpoint; ++ /* ++ * tx is used to send characters to lcd/vfd, associate RF ++ * remotes, set IR protocol, and maybe more... ++ */ ++ ictx->tx_endpoint = tx_endpoint; ++ } else { ++ ictx->rx_endpoint_intf1 = rx_endpoint; ++ } ++ ++ /* ++ * If we didn't find a display endpoint, this is probably one of the ++ * newer iMON devices that use control urb instead of interrupt ++ */ ++ if (!display_ep_found) { ++ tx_control = 1; ++ display_ep_found = 1; ++ dev_dbg(ictx->dev, "%s: device uses control endpoint, not " ++ "interface OUT endpoint\n", __func__); ++ } ++ ++ /* ++ * Some iMON receivers have no display. Unfortunately, it seems ++ * that SoundGraph recycles device IDs between devices both with ++ * and without... :\ ++ */ ++ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) { ++ display_ep_found = 0; ++ dev_dbg(ictx->dev, "%s: device has no display\n", __func__); ++ } ++ ++ /* ++ * iMON Touch devices have a VGA touchscreen, but no "display", as ++ * that refers to e.g. /dev/lcd0 (a character device LCD or VFD). ++ */ ++ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { ++ display_ep_found = 0; ++ dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__); ++ } ++ ++ /* Input endpoint is mandatory */ ++ if (!ir_ep_found) ++ err("%s: no valid input (IR) endpoint found.", __func__); ++ ++ ictx->tx_control = tx_control; ++ ++ if (display_ep_found) ++ ictx->display_supported = 1; ++ ++ return ir_ep_found; ++ ++} ++ ++static struct imon_context *imon_init_intf0(struct usb_interface *intf) ++{ ++ struct imon_context *ictx; ++ struct urb *rx_urb; ++ struct urb *tx_urb; ++ struct device *dev = &intf->dev; ++ struct usb_host_interface *iface_desc; ++ int ret; ++ ++ ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL); ++ if (!ictx) { ++ dev_err(dev, "%s: kzalloc failed for context", __func__); ++ goto exit; ++ } ++ rx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!rx_urb) { ++ dev_err(dev, "%s: usb_alloc_urb failed for IR urb", __func__); ++ goto rx_urb_alloc_failed; ++ } ++ tx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!tx_urb) { ++ dev_err(dev, "%s: usb_alloc_urb failed for display urb", ++ __func__); ++ goto tx_urb_alloc_failed; ++ } ++ ++ mutex_init(&ictx->lock); ++ ++ mutex_lock(&ictx->lock); ++ ++ if (ir_protocol == IMON_IR_PROTOCOL_MCE) { ++ init_timer(&ictx->itimer); ++ ictx->itimer.data = (unsigned long)ictx; ++ ictx->itimer.function = imon_mce_timeout; ++ } ++ ++ ictx->dev = dev; ++ ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf)); ++ ictx->dev_present_intf0 = 1; ++ ictx->rx_urb_intf0 = rx_urb; ++ ictx->tx_urb = tx_urb; ++ ++ ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor); ++ ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct); ++ ++ iface_desc = intf->cur_altsetting; ++ if (!imon_find_endpoints(ictx, iface_desc)) ++ goto find_endpoint_failed; ++ ++ ictx->idev = imon_init_idev(ictx); ++ if (!ictx->idev) { ++ dev_err(dev, "%s: input device setup failed\n", __func__); ++ goto idev_setup_failed; ++ } ++ ++ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, ++ usb_rcvintpipe(ictx->usbdev_intf0, ++ ictx->rx_endpoint_intf0->bEndpointAddress), ++ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), ++ usb_rx_callback_intf0, ictx, ++ ictx->rx_endpoint_intf0->bInterval); ++ ++ ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL); ++ if (ret) { ++ err("%s: usb_submit_urb failed for intf0 (%d)", ++ __func__, ret); ++ goto urb_submit_failed; ++ } ++ ++ return ictx; ++ ++urb_submit_failed: ++ sparse_keymap_free(ictx->idev); ++ input_unregister_device(ictx->idev); ++ input_free_device(ictx->idev); ++idev_setup_failed: ++find_endpoint_failed: ++ mutex_unlock(&ictx->lock); ++ usb_free_urb(tx_urb); ++tx_urb_alloc_failed: ++ usb_free_urb(rx_urb); ++rx_urb_alloc_failed: ++ kfree(ictx); ++exit: ++ dev_err(dev, "unable to initialize intf0, err %d\n", ret); ++ ++ return NULL; ++} ++ ++static struct imon_context *imon_init_intf1(struct usb_interface *intf, ++ struct imon_context *ictx) ++{ ++ struct urb *rx_urb; ++ struct usb_host_interface *iface_desc; ++ int ret; ++ ++ rx_urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!rx_urb) { ++ err("%s: usb_alloc_urb failed for IR urb", __func__); ++ ret = -ENOMEM; ++ goto rx_urb_alloc_failed; ++ } ++ ++ mutex_lock(&ictx->lock); ++ ++ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { ++ init_timer(&ictx->ttimer); ++ ictx->ttimer.data = (unsigned long)ictx; ++ ictx->ttimer.function = imon_touch_display_timeout; ++ } ++ ++ ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf)); ++ ictx->dev_present_intf1 = 1; ++ ictx->rx_urb_intf1 = rx_urb; ++ ++ iface_desc = intf->cur_altsetting; ++ if (!imon_find_endpoints(ictx, iface_desc)) ++ goto find_endpoint_failed; ++ ++ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { ++ ictx->touch = imon_init_touch(ictx); ++ if (!ictx->touch) ++ goto touch_setup_failed; ++ } else ++ ictx->touch = NULL; ++ ++ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, ++ usb_rcvintpipe(ictx->usbdev_intf1, ++ ictx->rx_endpoint_intf1->bEndpointAddress), ++ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), ++ usb_rx_callback_intf1, ictx, ++ ictx->rx_endpoint_intf1->bInterval); ++ ++ ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL); ++ ++ if (ret) { ++ err("%s: usb_submit_urb failed for intf1 (%d)", ++ __func__, ret); ++ goto urb_submit_failed; ++ } ++ ++ return ictx; ++ ++urb_submit_failed: ++ if (ictx->touch) { ++ input_unregister_device(ictx->touch); ++ input_free_device(ictx->touch); ++ } ++touch_setup_failed: ++find_endpoint_failed: ++ mutex_unlock(&ictx->lock); ++ usb_free_urb(rx_urb); ++rx_urb_alloc_failed: ++ dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret); ++ ++ return NULL; ++} ++ ++static void imon_set_display_type(struct imon_context *ictx, ++ struct usb_interface *intf) ++{ ++ int configured_display_type = IMON_DISPLAY_TYPE_VFD; ++ ++ /* ++ * Try to auto-detect the type of display if the user hasn't set ++ * it by hand via the display_type modparam. Default is VFD. ++ */ ++ if (display_type == IMON_DISPLAY_TYPE_AUTO) { ++ if (usb_match_id(intf, lcd_device_list)) ++ configured_display_type = IMON_DISPLAY_TYPE_LCD; ++ else if (usb_match_id(intf, imon_touchscreen_list)) ++ configured_display_type = IMON_DISPLAY_TYPE_VGA; ++ else if (usb_match_id(intf, ir_only_list)) ++ configured_display_type = IMON_DISPLAY_TYPE_NONE; ++ else ++ configured_display_type = IMON_DISPLAY_TYPE_VFD; ++ } else { ++ configured_display_type = display_type; ++ dev_dbg(ictx->dev, "%s: overriding display type to %d via " ++ "modparam\n", __func__, display_type); ++ } ++ ++ ictx->display_type = configured_display_type; ++} ++ ++static void imon_init_display(struct imon_context *ictx, ++ struct usb_interface *intf) ++{ ++ int ret; ++ const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x88 }; ++ ++ dev_dbg(ictx->dev, "Registering iMON display with sysfs\n"); ++ ++ /* set up sysfs entry for built-in clock */ ++ ret = sysfs_create_group(&intf->dev.kobj, ++ &imon_display_attribute_group); ++ if (ret) ++ dev_err(ictx->dev, "Could not create display sysfs " ++ "entries(%d)", ret); ++ ++ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) ++ ret = usb_register_dev(intf, &imon_lcd_class); ++ else ++ ret = usb_register_dev(intf, &imon_vfd_class); ++ if (ret) ++ /* Not a fatal error, so ignore */ ++ dev_info(ictx->dev, "could not get a minor number for " ++ "display\n"); ++ ++ /* Enable front-panel buttons and/or knobs */ ++ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet)); ++ ret = send_packet(ictx); ++ /* Not fatal, but warn about it */ ++ if (ret) ++ dev_info(ictx->dev, "failed to enable front-panel " ++ "buttons and/or knobs\n"); ++} ++ ++/** ++ * Callback function for USB core API: Probe ++ */ ++static int __devinit imon_probe(struct usb_interface *interface, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *usbdev = NULL; ++ struct usb_host_interface *iface_desc = NULL; ++ struct usb_interface *first_if; ++ struct device *dev = &interface->dev; ++ int ifnum, code_length, sysfs_err; ++ int ret = 0; ++ struct imon_context *ictx = NULL; ++ struct imon_context *first_if_ctx = NULL; ++ u16 vendor, product; ++ ++ code_length = BUF_CHUNK_SIZE * 8; ++ ++ usbdev = usb_get_dev(interface_to_usbdev(interface)); ++ iface_desc = interface->cur_altsetting; ++ ifnum = iface_desc->desc.bInterfaceNumber; ++ vendor = le16_to_cpu(usbdev->descriptor.idVendor); ++ product = le16_to_cpu(usbdev->descriptor.idProduct); ++ ++ dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", ++ __func__, vendor, product, ifnum); ++ ++ /* prevent races probing devices w/multiple interfaces */ ++ mutex_lock(&driver_lock); ++ ++ first_if = usb_ifnum_to_if(usbdev, 0); ++ first_if_ctx = (struct imon_context *)usb_get_intfdata(first_if); ++ ++ ++ if (ifnum == 0) { ++ ictx = imon_init_intf0(interface); ++ if (!ictx) { ++ err("%s: failed to initialize context!\n", __func__); ++ ret = -ENODEV; ++ goto fail; ++ } ++ ++ imon_set_display_type(ictx, interface); ++ ++ if (ictx->display_supported) ++ imon_init_display(ictx, interface); ++ ++ if (product == 0xffdc) { ++ /* RF products *also* use 0xffdc... sigh... */ ++ sysfs_err = sysfs_create_group(&interface->dev.kobj, ++ &imon_rf_attribute_group); ++ if (sysfs_err) ++ err("%s: Could not create RF sysfs entries(%d)", ++ __func__, sysfs_err); ++ } ++ ++ } else { ++ /* this is the secondary interface on the device */ ++ ictx = imon_init_intf1(interface, first_if_ctx); ++ if (!ictx) { ++ err("%s: failed to attach to context!\n", __func__); ++ ret = -ENODEV; ++ goto fail; ++ } ++ ++ } ++ ++ usb_set_intfdata(interface, ictx); ++ ++ /* set IR protocol/remote type */ ++ imon_set_ir_protocol(ictx); ++ ++ dev_info(dev, "iMON device (%04x:%04x, intf%d) on " ++ "usb<%d:%d> initialized\n", vendor, product, ifnum, ++ usbdev->bus->busnum, usbdev->devnum); ++ ++ mutex_unlock(&ictx->lock); ++ mutex_unlock(&driver_lock); ++ ++ return 0; ++ ++fail: ++ mutex_unlock(&driver_lock); ++ dev_err(dev, "unable to register, err %d\n", ret); ++ ++ return ret; ++} ++ ++/** ++ * Callback function for USB core API: disconnect ++ */ ++static void __devexit imon_disconnect(struct usb_interface *interface) ++{ ++ struct imon_context *ictx; ++ struct device *dev; ++ int ifnum; ++ ++ /* prevent races with multi-interface device probing and display_open */ ++ mutex_lock(&driver_lock); ++ ++ ictx = usb_get_intfdata(interface); ++ dev = ictx->dev; ++ ifnum = interface->cur_altsetting->desc.bInterfaceNumber; ++ ++ mutex_lock(&ictx->lock); ++ ++ /* ++ * sysfs_remove_group is safe to call even if sysfs_create_group ++ * hasn't been called ++ */ ++ sysfs_remove_group(&interface->dev.kobj, ++ &imon_display_attribute_group); ++ sysfs_remove_group(&interface->dev.kobj, ++ &imon_rf_attribute_group); ++ ++ usb_set_intfdata(interface, NULL); ++ ++ /* Abort ongoing write */ ++ if (ictx->tx.busy) { ++ usb_kill_urb(ictx->tx_urb); ++ complete_all(&ictx->tx.finished); ++ } ++ ++ if (ifnum == 0) { ++ ictx->dev_present_intf0 = 0; ++ usb_kill_urb(ictx->rx_urb_intf0); ++ sparse_keymap_free(ictx->idev); ++ input_unregister_device(ictx->idev); ++ if (ictx->display_supported) { ++ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) ++ usb_deregister_dev(interface, &imon_lcd_class); ++ else ++ usb_deregister_dev(interface, &imon_vfd_class); ++ } ++ } else { ++ ictx->dev_present_intf1 = 0; ++ usb_kill_urb(ictx->rx_urb_intf1); ++ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) ++ input_unregister_device(ictx->touch); ++ } ++ ++ if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1) { ++ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) ++ del_timer_sync(&ictx->ttimer); ++ mutex_unlock(&ictx->lock); ++ if (!ictx->display_isopen) ++ free_imon_context(ictx); ++ } else { ++ if (ictx->ir_protocol == IMON_IR_PROTOCOL_MCE) ++ del_timer_sync(&ictx->itimer); ++ mutex_unlock(&ictx->lock); ++ } ++ ++ mutex_unlock(&driver_lock); ++ ++ dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n", ++ __func__, ifnum); ++} ++ ++static int imon_suspend(struct usb_interface *intf, pm_message_t message) ++{ ++ struct imon_context *ictx = usb_get_intfdata(intf); ++ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; ++ ++ if (ifnum == 0) ++ usb_kill_urb(ictx->rx_urb_intf0); ++ else ++ usb_kill_urb(ictx->rx_urb_intf1); ++ ++ return 0; ++} ++ ++static int imon_resume(struct usb_interface *intf) ++{ ++ int rc = 0; ++ struct imon_context *ictx = usb_get_intfdata(intf); ++ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; ++ ++ if (ifnum == 0) { ++ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, ++ usb_rcvintpipe(ictx->usbdev_intf0, ++ ictx->rx_endpoint_intf0->bEndpointAddress), ++ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), ++ usb_rx_callback_intf0, ictx, ++ ictx->rx_endpoint_intf0->bInterval); ++ ++ rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); ++ ++ } else { ++ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, ++ usb_rcvintpipe(ictx->usbdev_intf1, ++ ictx->rx_endpoint_intf1->bEndpointAddress), ++ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), ++ usb_rx_callback_intf1, ictx, ++ ictx->rx_endpoint_intf1->bInterval); ++ ++ rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); ++ } ++ ++ return rc; ++} ++ ++static int __init imon_init(void) ++{ ++ int rc; ++ ++ rc = usb_register(&imon_driver); ++ if (rc) { ++ err("%s: usb register failed(%d)", __func__, rc); ++ rc = -ENODEV; ++ } ++ ++ return rc; ++} ++ ++static void __exit imon_exit(void) ++{ ++ usb_deregister(&imon_driver); ++} ++ ++module_init(imon_init); ++module_exit(imon_exit); diff --git a/mac80211-do-not-wipe-out-old-supported-rates.patch b/mac80211-do-not-wipe-out-old-supported-rates.patch new file mode 100644 index 000000000..e458a08ad --- /dev/null +++ b/mac80211-do-not-wipe-out-old-supported-rates.patch @@ -0,0 +1,71 @@ +From: Stanislaw Gruszka +To: kernel@lists.fedoraproject.org, "John W. Linville" +Subject: [PATCH 3/4 2.6.34.y] mac80211: do not wip out old supported rates +Date: Fri, 11 Jun 2010 17:05:13 +0200 + +commit f0b058b61711ebf5be94d6865ca7b2c259b71d37 upstream. + +Use old supported rates, if AP do not provide supported rates +information element in a new managment frame. + +Signed-off-by: Stanislaw Gruszka +Signed-off-by: John W. Linville +--- + net/mac80211/scan.c | 21 +++++++++++---------- + 1 files changed, 11 insertions(+), 10 deletions(-) + +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c +index 85507bd..151d933 100644 +--- a/net/mac80211/scan.c ++++ b/net/mac80211/scan.c +@@ -83,7 +83,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local, + { + struct cfg80211_bss *cbss; + struct ieee80211_bss *bss; +- int clen; ++ int clen, srlen; + s32 signal = 0; + + if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) +@@ -112,23 +112,24 @@ ieee80211_bss_info_update(struct ieee80211_local *local, + bss->dtim_period = tim_ie->dtim_period; + } + +- bss->supp_rates_len = 0; ++ /* replace old supported rates if we get new values */ ++ srlen = 0; + if (elems->supp_rates) { +- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; ++ clen = IEEE80211_MAX_SUPP_RATES; + if (clen > elems->supp_rates_len) + clen = elems->supp_rates_len; +- memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, +- clen); +- bss->supp_rates_len += clen; ++ memcpy(bss->supp_rates, elems->supp_rates, clen); ++ srlen += clen; + } + if (elems->ext_supp_rates) { +- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; ++ clen = IEEE80211_MAX_SUPP_RATES - srlen; + if (clen > elems->ext_supp_rates_len) + clen = elems->ext_supp_rates_len; +- memcpy(&bss->supp_rates[bss->supp_rates_len], +- elems->ext_supp_rates, clen); +- bss->supp_rates_len += clen; ++ memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen); ++ srlen += clen; + } ++ if (srlen) ++ bss->supp_rates_len = srlen; + + bss->wmm_used = elems->wmm_param || elems->wmm_info; + bss->uapsd_supported = is_uapsd_supported(elems); +-- +1.6.2.5 + +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/mac80211-explicitly-disable-enable-QoS.patch b/mac80211-explicitly-disable-enable-QoS.patch new file mode 100644 index 000000000..3d0005133 --- /dev/null +++ b/mac80211-explicitly-disable-enable-QoS.patch @@ -0,0 +1,103 @@ +From: Stanislaw Gruszka +To: kernel@lists.fedoraproject.org, "John W. Linville" +Subject: [PATCH 1/4 2.6.34.y] mac80211: explicitly disable/enable QoS +Date: Fri, 11 Jun 2010 17:05:11 +0200 + +commit e1b3ec1a2a336c328c336cfa5485a5f0484cc90d upstream. + +Add interface to disable/enable QoS (aka WMM or WME). Currently drivers +enable it explicitly when ->conf_tx method is called, and newer disable. +Disabling is needed for some APs, which do not support QoS, such +we should send QoS frames to them. + +Signed-off-by: Stanislaw Gruszka +Signed-off-by: John W. Linville +--- + include/net/mac80211.h | 5 +++++ + net/mac80211/mlme.c | 9 ++++++++- + net/mac80211/util.c | 5 +++++ + 3 files changed, 18 insertions(+), 1 deletions(-) + +diff --git a/include/net/mac80211.h b/include/net/mac80211.h +index 45d7d44..ea607d6 100644 +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -580,11 +580,15 @@ struct ieee80211_rx_status { + * may turn the device off as much as possible. Typically, this flag will + * be set when an interface is set UP but not associated or scanning, but + * it can also be unset in that case when monitor interfaces are active. ++ * @IEEE80211_CONF_QOS: Enable 802.11e QoS also know as WMM (Wireless ++ * Multimedia). On some drivers (iwlwifi is one of know) we have ++ * to enable/disable QoS explicitly. + */ + enum ieee80211_conf_flags { + IEEE80211_CONF_MONITOR = (1<<0), + IEEE80211_CONF_PS = (1<<1), + IEEE80211_CONF_IDLE = (1<<2), ++ IEEE80211_CONF_QOS = (1<<3), + }; + + +@@ -609,6 +613,7 @@ enum ieee80211_conf_changed { + IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), + IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), + IEEE80211_CONF_CHANGE_IDLE = BIT(8), ++ IEEE80211_CONF_CHANGE_QOS = BIT(9), + }; + + /** +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 875c8de..1b80e2b 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -592,6 +592,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, + int count; + u8 *pos, uapsd_queues = 0; + ++ if (!local->ops->conf_tx) ++ return; ++ + if (local->hw.queues < 4) + return; + +@@ -666,11 +669,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, + params.aifs, params.cw_min, params.cw_max, params.txop, + params.uapsd); + #endif +- if (drv_conf_tx(local, queue, ¶ms) && local->ops->conf_tx) ++ if (drv_conf_tx(local, queue, ¶ms)) + printk(KERN_DEBUG "%s: failed to set TX queue " + "parameters for queue %d\n", + wiphy_name(local->hw.wiphy), queue); + } ++ ++ /* enable WMM or activate new settings */ ++ local->hw.conf.flags |= IEEE80211_CONF_QOS; ++ drv_config(local, IEEE80211_CONF_CHANGE_QOS); + } + + static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index 53af570..582f43a 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -796,6 +796,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) + + drv_conf_tx(local, queue, &qparam); + } ++ ++ /* after reinitialize QoS TX queues setting to default, ++ * disable QoS at all */ ++ local->hw.conf.flags &= ~IEEE80211_CONF_QOS; ++ drv_config(local, IEEE80211_CONF_CHANGE_QOS); + } + + void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, +-- +1.6.2.5 + +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch b/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch new file mode 100644 index 000000000..a8eb7207e --- /dev/null +++ b/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch @@ -0,0 +1,66 @@ +From: Stanislaw Gruszka +To: kernel@lists.fedoraproject.org, "John W. Linville" +Subject: [PATCH 4/4 2.6.34.y] mac80211: fix supported rates IE if AP doesn't + give us it's rates +Date: Fri, 11 Jun 2010 17:05:14 +0200 + +commit 76f273640134f3eb8257179cd5b3bc6ba5fe4a96 upstream. + +If AP do not provide us supported rates before assiociation, send +all rates we are supporting instead of empty information element. + +v1 -> v2: Add comment. + +Signed-off-by: Stanislaw Gruszka +Signed-off-by: John W. Linville +--- + net/mac80211/work.c | 28 +++++++++++++++++++--------- + 1 files changed, 19 insertions(+), 9 deletions(-) + +diff --git a/net/mac80211/work.c b/net/mac80211/work.c +index 15e1ba9..949c2d1 100644 +--- a/net/mac80211/work.c ++++ b/net/mac80211/work.c +@@ -213,15 +213,25 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, + + sband = local->hw.wiphy->bands[wk->chan->band]; + +- /* +- * Get all rates supported by the device and the AP as +- * some APs don't like getting a superset of their rates +- * in the association request (e.g. D-Link DAP 1353 in +- * b-only mode)... +- */ +- rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates, +- wk->assoc.supp_rates_len, +- sband, &rates); ++ if (wk->assoc.supp_rates_len) { ++ /* ++ * Get all rates supported by the device and the AP as ++ * some APs don't like getting a superset of their rates ++ * in the association request (e.g. D-Link DAP 1353 in ++ * b-only mode)... ++ */ ++ rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates, ++ wk->assoc.supp_rates_len, ++ sband, &rates); ++ } else { ++ /* ++ * In case AP not provide any supported rates information ++ * before association, we send information element(s) with ++ * all rates that we support. ++ */ ++ rates = ~0; ++ rates_len = sband->n_bitrates; ++ } + + skb = alloc_skb(local->hw.extra_tx_headroom + + sizeof(*mgmt) + /* bit too much but doesn't matter */ +-- +1.6.2.5 + +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/merge.pl b/merge.pl new file mode 100755 index 000000000..8c318156a --- /dev/null +++ b/merge.pl @@ -0,0 +1,66 @@ +#! /usr/bin/perl + +my @args=@ARGV; +my %configvalues; +my @configoptions; +my $configcounter = 0; + +# optionally print out the architecture as the first line of our output +my $arch = $args[2]; +if (defined $arch) { + print "# $arch\n"; +} + +# first, read the override file + +open (FILE,"$args[0]") || die "Could not open $args[0]"; +while () { + my $str = $_; + my $configname; + + if (/\# ([\w]+) is not set/) { + $configname = $1; + } elsif (/([\w]+)=/) { + $configname = $1; + } + + if (defined($configname) && !exists($configvalues{$configname})) { + $configvalues{$configname} = $str; + $configoptions[$configcounter] = $configname; + $configcounter ++; + } +}; + +# now, read and output the entire configfile, except for the overridden +# parts... for those the new value is printed. + +open (FILE2,"$args[1]") || die "Could not open $args[1]"; +while () { + my $configname; + + if (/\# ([\w]+) is not set/) { + $configname = $1; + } elsif (/([\w]+)=/) { + $configname = $1; + } + + if (defined($configname) && exists($configvalues{$configname})) { + print "$configvalues{$configname}"; + delete($configvalues{$configname}); + } else { + print "$_"; + } +} + +# now print the new values from the overridden configfile +my $counter = 0; + +while ($counter < $configcounter) { + my $configname = $configoptions[$counter]; + if (exists($configvalues{$configname})) { + print "$configvalues{$configname}"; + } + $counter++; +} + +1; diff --git a/neuter_intel_microcode_load.patch b/neuter_intel_microcode_load.patch new file mode 100644 index 000000000..2766e439d --- /dev/null +++ b/neuter_intel_microcode_load.patch @@ -0,0 +1,24 @@ +diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c +index ebd193e..c57024a 100644 +--- a/arch/x86/kernel/microcode_intel.c ++++ b/arch/x86/kernel/microcode_intel.c +@@ -453,9 +453,18 @@ static void microcode_fini_cpu(int cpu) + uci->mc = NULL; + } + ++/* we don't ship the broken out files... ++ * instead, we'll just fail here, and load it with microcode_ctl ++ */ ++static enum ucode_state noop_request_microcode_fw(int cpu, ++ struct device *device) ++{ ++ return UCODE_NFOUND; ++} ++ + static struct microcode_ops microcode_intel_ops = { + .request_microcode_user = request_microcode_user, +- .request_microcode_fw = request_microcode_fw, ++ .request_microcode_fw = noop_request_microcode_fw, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode, + .microcode_fini_cpu = microcode_fini_cpu, diff --git a/pci-acpi-disable-aspm-if-no-osc.patch b/pci-acpi-disable-aspm-if-no-osc.patch new file mode 100644 index 000000000..044f38964 --- /dev/null +++ b/pci-acpi-disable-aspm-if-no-osc.patch @@ -0,0 +1,53 @@ +From: Matthew Garrett +Subject: ACPI: Disable ASPM if the platform won't provide _OSC control for PCIe + +ACPI: Disable ASPM if the platform won't provide _OSC control for PCIe + +The PCI SIG documentation for the _OSC OS/firmware handshaking interface +states: + +"If the _OSC control method is absent from the scope of a host bridge +device, then the operating system must not enable or attempt to use any +features defined in this section for the hierarchy originated by the host +bridge." + +The obvious interpretation of this is that the OS should not attempt to use +PCIe hotplug, PME or AER - however, the specification also notes that an +_OSC method is *required* for PCIe hierarchies, and experimental validation +with An Alternative OS indicates that it doesn't use any PCIe functionality +if the _OSC method is missing. That arguably means we shouldn't be using +MSI or extended config space, but right now our problems seem to be limited +to vendors being surprised when ASPM gets enabled on machines when other +OSs refuse to do so. So, for now, let's just disable ASPM if the _OSC +method doesn't exist or refuses to hand over PCIe capability control. + +Signed-off-by: Matthew Garrett +--- + +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c +index 4eac593..1f67057 100644 +--- a/drivers/acpi/pci_root.c ++++ b/drivers/acpi/pci_root.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -543,6 +544,14 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) + if (flags != base_flags) + acpi_pci_osc_support(root, flags); + ++ status = acpi_pci_osc_control_set(root->device->handle, ++ 0); ++ ++ if (status == AE_NOT_EXIST) { ++ printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n"); ++ pcie_no_aspm(); ++ } ++ + pci_acpi_add_bus_pm_notifier(device, root->bus); + if (device->wakeup.flags.run_wake) + device_set_run_wake(root->bus->bridge, true); diff --git a/pci-aspm-dont-enable-too-early.patch b/pci-aspm-dont-enable-too-early.patch new file mode 100644 index 000000000..ea91a2554 --- /dev/null +++ b/pci-aspm-dont-enable-too-early.patch @@ -0,0 +1,50 @@ +From: Matthew Garrett +Date: Wed, 9 Jun 2010 20:05:07 +0000 (-0400) +Subject: PCI: Don't enable aspm before drivers have had a chance to veto it +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fjbarnes%2Fpci-2.6.git;a=commitdiff_plain;h=8f0b08c29f1df91315e48adce04462eb23671099 + +PCI: Don't enable aspm before drivers have had a chance to veto it + +The aspm code will currently set the configured aspm policy before drivers +have had an opportunity to indicate that their hardware doesn't support it. +Unfortunately, putting some hardware in L0 or L1 can result in the hardware +no longer responding to any requests, even after aspm is disabled. It makes +more sense to leave aspm policy at the BIOS defaults at initial setup time, +reconfiguring it after pci_enable_device() is called. This allows the +driver to blacklist individual devices beforehand. + +Reviewed-by: Kenji Kaneshige +Signed-off-by: Matthew Garrett +Signed-off-by: Jesse Barnes +--- + +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index be53d98..7122281 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -588,11 +588,23 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) + * update through pcie_aspm_cap_init(). + */ + pcie_aspm_cap_init(link, blacklist); +- pcie_config_aspm_path(link); + + /* Setup initial Clock PM state */ + pcie_clkpm_cap_init(link, blacklist); +- pcie_set_clkpm(link, policy_to_clkpm_state(link)); ++ ++ /* ++ * At this stage drivers haven't had an opportunity to change the ++ * link policy setting. Enabling ASPM on broken hardware can cripple ++ * it even before the driver has had a chance to disable ASPM, so ++ * default to a safe level right now. If we're enabling ASPM beyond ++ * the BIOS's expectation, we'll do so once pci_enable_device() is ++ * called. ++ */ ++ if (aspm_policy != POLICY_POWERSAVE) { ++ pcie_config_aspm_path(link); ++ pcie_set_clkpm(link, policy_to_clkpm_state(link)); ++ } ++ + unlock: + mutex_unlock(&aspm_lock); + out: diff --git a/pci-change-error-messages-to-kern-info.patch b/pci-change-error-messages-to-kern-info.patch new file mode 100644 index 000000000..e0ce16cb1 --- /dev/null +++ b/pci-change-error-messages-to-kern-info.patch @@ -0,0 +1,43 @@ +From: Bjorn Helgaas +Date: Thu, 3 Jun 2010 19:47:18 +0000 (-0600) +Subject: PCI: change resource collision messages from KERN_ERR to KERN_INFO +X-Git-Tag: v2.6.35-rc3~4^2~3 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=f6d440daebd12be66ea1f834faf2966a49a07bd6 + +PCI: change resource collision messages from KERN_ERR to KERN_INFO + +We can often deal with PCI resource issues by moving devices around. In +that case, there's no point in alarming the user with messages like these. +There are many bug reports where the message itself is the only problem, +e.g., https://bugs.launchpad.net/ubuntu/+source/linux/+bug/413419 . + +Signed-off-by: Bjorn Helgaas +Signed-off-by: Jesse Barnes +--- + +diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c +index 17bed18..92379e2 100644 +--- a/drivers/pci/setup-res.c ++++ b/drivers/pci/setup-res.c +@@ -97,16 +97,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) + + root = pci_find_parent_resource(dev, res); + if (!root) { +- dev_err(&dev->dev, "no compatible bridge window for %pR\n", +- res); ++ dev_info(&dev->dev, "no compatible bridge window for %pR\n", ++ res); + return -EINVAL; + } + + conflict = request_resource_conflict(root, res); + if (conflict) { +- dev_err(&dev->dev, +- "address space collision: %pR conflicts with %s %pR\n", +- res, conflict->name, conflict); ++ dev_info(&dev->dev, ++ "address space collision: %pR conflicts with %s %pR\n", ++ res, conflict->name, conflict); + return -EBUSY; + } + diff --git a/pci-fall-back-to-original-bios-bar-addresses.patch b/pci-fall-back-to-original-bios-bar-addresses.patch new file mode 100644 index 000000000..e65e0c13d --- /dev/null +++ b/pci-fall-back-to-original-bios-bar-addresses.patch @@ -0,0 +1,103 @@ +From: Bjorn Helgaas +Date: Thu, 15 Jul 2010 15:41:42 +0000 (-0600) +Subject: PCI: fall back to original BIOS BAR addresses +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=58c84eda07560a6b75b03e8d3b26d6eddfc14011 + +PCI: fall back to original BIOS BAR addresses + +If we fail to assign resources to a PCI BAR, this patch makes us try the +original address from BIOS rather than leaving it disabled. + +Linux tries to make sure all PCI device BARs are inside the upstream +PCI host bridge or P2P bridge apertures, reassigning BARs if necessary. +Windows does similar reassignment. + +Before this patch, if we could not move a BAR into an aperture, we left +the resource unassigned, i.e., at address zero. Windows leaves such BARs +at the original BIOS addresses, and this patch makes Linux do the same. + +This is a bit ugly because we disable the resource long before we try to +reassign it, so we have to keep track of the BIOS BAR address somewhere. +For lack of a better place, I put it in the struct pci_dev. + +I think it would be cleaner to attempt the assignment immediately when the +claim fails, so we could easily remember the original address. But we +currently claim motherboard resources in the middle, after attempting to +claim PCI resources and before assigning new PCI resources, and changing +that is a fairly big job. + +Addresses https://bugzilla.kernel.org/show_bug.cgi?id=16263 + +Reported-by: Andrew +Tested-by: Andrew +Signed-off-by: Bjorn Helgaas +Signed-off-by: Jesse Barnes +--- + +diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c +index 6fdb3ec..5525309 100644 +--- a/arch/x86/pci/i386.c ++++ b/arch/x86/pci/i386.c +@@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass) + idx, r, disabled, pass); + if (pci_claim_resource(dev, idx) < 0) { + /* We'll assign a new address later */ ++ dev->fw_addr[idx] = r->start; + r->end -= r->start; + r->start = 0; + } +diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c +index 92379e2..2aaa131 100644 +--- a/drivers/pci/setup-res.c ++++ b/drivers/pci/setup-res.c +@@ -156,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, + pcibios_align_resource, dev); + } + ++ if (ret < 0 && dev->fw_addr[resno]) { ++ struct resource *root, *conflict; ++ resource_size_t start, end; ++ ++ /* ++ * If we failed to assign anything, let's try the address ++ * where firmware left it. That at least has a chance of ++ * working, which is better than just leaving it disabled. ++ */ ++ ++ if (res->flags & IORESOURCE_IO) ++ root = &ioport_resource; ++ else ++ root = &iomem_resource; ++ ++ start = res->start; ++ end = res->end; ++ res->start = dev->fw_addr[resno]; ++ res->end = res->start + size - 1; ++ dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", ++ resno, res); ++ conflict = request_resource_conflict(root, res); ++ if (conflict) { ++ dev_info(&dev->dev, ++ "BAR %d: %pR conflicts with %s %pR\n", resno, ++ res, conflict->name, conflict); ++ res->start = start; ++ res->end = end; ++ } else ++ ret = 0; ++ } ++ + if (!ret) { + res->flags &= ~IORESOURCE_STARTALIGN; + dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 7cb0084..f26fda7 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -288,6 +288,7 @@ struct pci_dev { + */ + unsigned int irq; + struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ ++ resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */ + + /* These fields are used by common fixups */ + unsigned int transparent:1; /* Transparent PCI bridge */ diff --git a/pci-pm-do-not-use-native-pcie-pme-by-default.patch b/pci-pm-do-not-use-native-pcie-pme-by-default.patch new file mode 100644 index 000000000..3e6ef9517 --- /dev/null +++ b/pci-pm-do-not-use-native-pcie-pme-by-default.patch @@ -0,0 +1,87 @@ +From: Rafael J. Wysocki +Date: Fri, 18 Jun 2010 15:04:22 +0000 (+0200) +Subject: PCI/PM: Do not use native PCIe PME by default +X-Git-Tag: v2.6.35-rc4~60^2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=b27759f880018b0cd43543dc94c921341b64b5ec + +PCI/PM: Do not use native PCIe PME by default + +Commit c7f486567c1d0acd2e4166c47069835b9f75e77b +(PCI PM: PCIe PME root port service driver) causes the native PCIe +PME signaling to be used by default, if the BIOS allows the kernel to +control the standard configuration registers of PCIe root ports. +However, the native PCIe PME is coupled to the native PCIe hotplug +and calling pcie_pme_acpi_setup() makes some BIOSes expect that +the native PCIe hotplug will be used as well. That, in turn, causes +problems to appear on systems where the PCIe hotplug driver is not +loaded. The usual symptom, as reported by Jaroslav Kameník and +others, is that the ACPI GPE associated with PCIe hotplug keeps +firing continuously causing kacpid to take substantial percentage +of CPU time. + +To work around this issue, change the default so that the native +PCIe PME signaling is only used if directly requested with the help +of the pcie_pme= command line switch. + +Fixes https://bugzilla.kernel.org/show_bug.cgi?id=15924 , which is +a listed regression from 2.6.33. + +Signed-off-by: Rafael J. Wysocki +Reported-by: Jaroslav Kameník +Tested-by: Antoni Grzymala +Signed-off-by: Jesse Barnes +--- + +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 1808f11..82d6aeb 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -2048,7 +2048,9 @@ and is between 256 and 4096 characters. It is defined in the file + WARNING: Forcing ASPM on may cause system lockups. + + pcie_pme= [PCIE,PM] Native PCIe PME signaling options: +- off Do not use native PCIe PME signaling. ++ Format: {auto|force}[,nomsi] ++ auto Use native PCIe PME signaling if the BIOS allows the ++ kernel to control PCIe config registers of root ports. + force Use native PCIe PME signaling even if the BIOS refuses + to allow the kernel to control the relevant PCIe config + registers. +diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c +index aac285a..d672a0a 100644 +--- a/drivers/pci/pcie/pme/pcie_pme.c ++++ b/drivers/pci/pcie/pme/pcie_pme.c +@@ -34,7 +34,7 @@ + * being registered. Consequently, the interrupt-based PCIe PME signaling will + * not be used by any PCIe root ports in that case. + */ +-static bool pcie_pme_disabled; ++static bool pcie_pme_disabled = true; + + /* + * The PCI Express Base Specification 2.0, Section 6.1.8, states the following: +@@ -64,12 +64,19 @@ bool pcie_pme_msi_disabled; + + static int __init pcie_pme_setup(char *str) + { +- if (!strcmp(str, "off")) +- pcie_pme_disabled = true; +- else if (!strcmp(str, "force")) ++ if (!strncmp(str, "auto", 4)) ++ pcie_pme_disabled = false; ++ else if (!strncmp(str, "force", 5)) + pcie_pme_force_enable = true; +- else if (!strcmp(str, "nomsi")) +- pcie_pme_msi_disabled = true; ++ ++ str = strchr(str, ','); ++ if (str) { ++ str++; ++ str += strspn(str, " \t"); ++ if (*str && !strcmp(str, "nomsi")) ++ pcie_pme_msi_disabled = true; ++ } ++ + return 1; + } + __setup("pcie_pme=", pcie_pme_setup); diff --git a/perf b/perf new file mode 100644 index 000000000..ea8980694 --- /dev/null +++ b/perf @@ -0,0 +1,12 @@ +#!/bin/sh + +# In pathological situations, this will print some error about uname. +kverrel="`uname -r`" || exit + +exec "/usr/libexec/perf.$kverrel" ${1+"$@"} +rc=$? + +# We're still here, so the exec failed. +echo >&2 "Sorry, your kernel ($kverrel) doesn't support perf." + +exit $rc diff --git a/prevent-runtime-conntrack-changes.patch b/prevent-runtime-conntrack-changes.patch new file mode 100644 index 000000000..59d62f3de --- /dev/null +++ b/prevent-runtime-conntrack-changes.patch @@ -0,0 +1,74 @@ +Jon Masters correctly points out that conntrack hash sizes +(nf_conntrack_htable_size) are global (not per-netns) and +modifiable at runtime via /sys/module/nf_conntrack/hashsize . + +Steps to reproduce: + clone(CLONE_NEWNET) + [grow /sys/module/nf_conntrack/hashsize] + exit() + +At netns exit we are going to scan random memory for conntracks to be killed. + +Apparently there is a code which deals with hashtable resize for +init_net (and it was there befode netns conntrack code), so prohibit +hashsize modification if there is more than one netns exists. + +To change hashtable sizes, you need to reload module. + +Expectation hashtable size was simply glued to a variable with no code +to rehash expectations, so it was a bug to allow writing to it. +Make "expect_hashsize" readonly. + +This is temporarily until we figure out what to do. + +Signed-off-by: Alexey Dobriyan +Cc: stable@kernel.org +--- + + net/netfilter/nf_conntrack_core.c | 15 +++++++++++++++ + net/netfilter/nf_conntrack_expect.c | 2 +- + 2 files changed, 16 insertions(+), 1 deletion(-) + +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1198,6 +1199,20 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) + if (!nf_conntrack_htable_size) + return param_set_uint(val, kp); + ++ { ++ struct net *net; ++ unsigned int nr; ++ ++ nr = 0; ++ rtnl_lock(); ++ for_each_net(net) ++ nr++; ++ rtnl_unlock(); ++ /* init_net always exists */ ++ if (nr != 1) ++ return -EINVAL; ++ } ++ + hashsize = simple_strtoul(val, NULL, 0); + if (!hashsize) + return -EINVAL; +--- a/net/netfilter/nf_conntrack_expect.c ++++ b/net/netfilter/nf_conntrack_expect.c +@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net) + #endif /* CONFIG_PROC_FS */ + } + +-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); ++module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); + + int nf_conntrack_expect_init(struct net *net) + { + diff --git a/quiet-prove_RCU-in-cgroups.patch b/quiet-prove_RCU-in-cgroups.patch new file mode 100644 index 000000000..f043ef51c --- /dev/null +++ b/quiet-prove_RCU-in-cgroups.patch @@ -0,0 +1,36 @@ +diff --git a/kernel/softlockup.c b/kernel/softlockup.c +index 4b493f6..ada1fcd 100644 +--- a/kernel/softlockup.c ++++ b/kernel/softlockup.c +@@ -187,7 +187,9 @@ static int watchdog(void *__bind_cpu) + { + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + ++ rcu_read_lock(); + sched_setscheduler(current, SCHED_FIFO, ¶m); ++ rcu_read_unlock(); + + /* initialize timestamp */ + __touch_softlockup_watchdog(); +diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c +index 5a5ea2c..47ecc56 100644 +--- a/kernel/sched_fair.c ++++ b/kernel/sched_fair.c +@@ -1272,6 +1272,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) + * effect of the currently running task from the load + * of the current CPU: + */ ++ ++ rcu_read_lock(); ++ + if (sync) { + tg = task_group(current); + weight = current->se.load.weight; +@@ -1298,6 +1301,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) + 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= + imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); + ++ rcu_read_unlock(); + /* + * If the currently running task will sleep within + * a reasonable amount of time then attract this newly diff --git a/revert-drm-kms-toggle-poll-around-switcheroo.patch b/revert-drm-kms-toggle-poll-around-switcheroo.patch new file mode 100644 index 000000000..f83fc2fdf --- /dev/null +++ b/revert-drm-kms-toggle-poll-around-switcheroo.patch @@ -0,0 +1,65 @@ +From 69b711c0c5e3d9cb3a5b9f741fb4cdc96b5739cb Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Subject: Revert "drm/kms: disable/enable poll around switcheroo on/off" + +This reverts commit fbf81762e385d3d45acad057b654d56972acf58c, mostly. +--- + drivers/gpu/drm/i915/i915_dma.c | 4 +--- + drivers/gpu/drm/nouveau/nouveau_state.c | 3 --- + drivers/gpu/drm/radeon/radeon_device.c | 2 -- + +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 59a2bf8..2df3286 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -1320,14 +1320,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { +- printk(KERN_INFO "i915: switched on\n"); ++ printk(KERN_INFO "i915: switched off\n"); + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume(dev); +- drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_ERR "i915: switched off\n"); +- drm_kms_helper_poll_disable(dev); + i915_suspend(dev, pmm); + } + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index b02a231..0c28266 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -376,15 +376,12 @@ out_err: + static void nouveau_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) + { +- struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { + printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); + nouveau_pci_resume(pdev); +- drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); +- drm_kms_helper_poll_disable(dev); + nouveau_pci_suspend(pdev, pmm); + } + } +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index f10faed..225a9f2 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -546,10 +546,8 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero + /* don't suspend or resume card normally */ + rdev->powered_down = false; + radeon_resume_kms(dev); +- drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_INFO "radeon: switched off\n"); +- drm_kms_helper_poll_disable(dev); + radeon_suspend_kms(dev, pmm); + /* don't suspend or resume card normally */ + rdev->powered_down = true; diff --git a/sched-fix-over-scheduling-bug.patch b/sched-fix-over-scheduling-bug.patch new file mode 100644 index 000000000..b09c10196 --- /dev/null +++ b/sched-fix-over-scheduling-bug.patch @@ -0,0 +1,60 @@ +From: Alex,Shi +Date: Thu, 17 Jun 2010 06:08:13 +0000 (+0800) +Subject: sched: Fix over-scheduling bug +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3c93717cfa51316e4dbb471e7c0f9d243359d5f8 + +sched: Fix over-scheduling bug + +Commit e70971591 ("sched: Optimize unused cgroup configuration") introduced +an imbalanced scheduling bug. [[ in 2.6.32-rc1 ]] + +If we do not use CGROUP, function update_h_load won't update h_load. When the +system has a large number of tasks far more than logical CPU number, the +incorrect cfs_rq[cpu]->h_load value will cause load_balance() to pull too +many tasks to the local CPU from the busiest CPU. So the busiest CPU keeps +going in a round robin. That will hurt performance. + +The issue was found originally by a scientific calculation workload that +developed by Yanmin. With that commit, the workload performance drops +about 40%. + + CPU before after + + 00 : 2 : 7 + 01 : 1 : 7 + 02 : 11 : 6 + 03 : 12 : 7 + 04 : 6 : 6 + 05 : 11 : 7 + 06 : 10 : 6 + 07 : 12 : 7 + 08 : 11 : 6 + 09 : 12 : 6 + 10 : 1 : 6 + 11 : 1 : 6 + 12 : 6 : 6 + 13 : 2 : 6 + 14 : 2 : 6 + 15 : 1 : 6 + +Reviewed-by: Yanmin zhang +Signed-off-by: Alex Shi +Signed-off-by: Peter Zijlstra +LKML-Reference: <1276754893.9452.5442.camel@debian> +Signed-off-by: Ingo Molnar +--- + +diff --git a/kernel/sched.c b/kernel/sched.c +index 2aaceeb..6c9e7c8 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -1657,9 +1657,6 @@ static void update_shares(struct sched_domain *sd) + + static void update_h_load(long cpu) + { +- if (root_task_group_empty()) +- return; +- + walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); + } + diff --git a/sources b/sources index e69de29bb..15b118148 100644 --- a/sources +++ b/sources @@ -0,0 +1,2 @@ +10eebcb0178fb4540e2165bfd7efc7ad linux-2.6.34.tar.bz2 +6606bcddb89228bd4c7a5b82de384aa5 patch-2.6.34.1.bz2 diff --git a/ssb_check_for_sprom.patch b/ssb_check_for_sprom.patch new file mode 100644 index 000000000..9415e1337 --- /dev/null +++ b/ssb_check_for_sprom.patch @@ -0,0 +1,155 @@ +From 4d9d1ff88f920e9fcdde155c0a1366b7e0462d14 Mon Sep 17 00:00:00 2001 +From: John W. Linville +Date: Fri, 19 Mar 2010 14:58:01 -0400 +Subject: [PATCH v4] ssb: do not read SPROM if it does not exist + +Attempting to read registers that don't exist on the SSB bus can cause +hangs on some boxes. At least some b43 devices are 'in the wild' that +don't have SPROMs at all. When the SSB bus support loads, it attempts +to read these (non-existant) SPROMs and causes hard hangs on the box -- +no console output, etc. + +This patch adds some intelligence to determine whether or not the SPROM +is present before attempting to read it. This avoids those hard hangs +on those devices with no SPROM attached to their SSB bus. The +SSB-attached devices (e.g. b43, et al.) won't work, but at least the box +will survive to test further patches. :-) + +Signed-off-by: John W. Linville +Cc: Larry Finger +Cc: Michael Buesch +Cc: stable@kernel.org +--- + drivers/ssb/driver_chipcommon.c | 3 +++ + drivers/ssb/pci.c | 3 +++ + drivers/ssb/sprom.c | 26 ++++++++++++++++++++++++++ + include/linux/ssb/ssb.h | 3 +++ + include/linux/ssb/ssb_driver_chipcommon.h | 15 +++++++++++++++ + 5 files changed, 50 insertions(+), 0 deletions(-) + +diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c +index 9681536..6cf288d 100644 +--- a/drivers/ssb/driver_chipcommon.c ++++ b/drivers/ssb/driver_chipcommon.c +@@ -233,6 +233,9 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc) + { + if (!cc->dev) + return; /* We don't have a ChipCommon */ ++ if (cc->dev->id.revision >= 11) { ++ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); ++ } + ssb_pmu_init(cc); + chipco_powercontrol_init(cc); + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); +diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c +index a8dbb06..89d7ab1 100644 +--- a/drivers/ssb/pci.c ++++ b/drivers/ssb/pci.c +@@ -621,6 +621,9 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, + int err = -ENOMEM; + u16 *buf; + ++ if (!ssb_is_sprom_available(bus)) ++ return -ENODEV; ++ + buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); + if (!buf) + goto out; +diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c +index f2f920f..c690f58 100644 +--- a/drivers/ssb/sprom.c ++++ b/drivers/ssb/sprom.c +@@ -176,3 +176,29 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void) + { + return fallback_sprom; + } ++ ++bool ssb_is_sprom_available(struct ssb_bus *bus) ++{ ++ /* some older devices don't have chipcommon, but they have sprom */ ++ if (!bus->chipco.dev) ++ return true; ++ ++ /* status register only exists on chipcomon rev >= 11 */ ++ if (bus->chipco.dev->id.revision < 11) ++ return true; ++ ++ switch (bus->chip_id) { ++ case 0x4312: ++ return SSB_CHIPCO_CHST_4312_SPROM_PRESENT(bus->chipco.status); ++ case 0x4322: ++ return SSB_CHIPCO_CHST_4322_SPROM_PRESENT(bus->chipco.status); ++ case 0x4325: ++ return SSB_CHIPCO_CHST_4325_SPROM_PRESENT(bus->chipco.status); ++ default: ++ break; ++ } ++ if (bus->chipco.dev->id.revision >= 31) ++ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM; ++ ++ return true; ++} +diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h +index 24f9885..3b4da23 100644 +--- a/include/linux/ssb/ssb.h ++++ b/include/linux/ssb/ssb.h +@@ -394,6 +394,9 @@ extern int ssb_bus_sdiobus_register(struct ssb_bus *bus, + + extern void ssb_bus_unregister(struct ssb_bus *bus); + ++/* Does the device have an SPROM? */ ++extern bool ssb_is_sprom_available(struct ssb_bus *bus); ++ + /* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ + extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); +diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h +index 4e27acf..2cdf249 100644 +--- a/include/linux/ssb/ssb_driver_chipcommon.h ++++ b/include/linux/ssb/ssb_driver_chipcommon.h +@@ -53,6 +53,7 @@ + #define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ + #define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ + #define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ ++#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */ + #define SSB_CHIPCO_CORECTL 0x0008 + #define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ + #define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +@@ -385,6 +386,7 @@ + + + /** Chip specific Chip-Status register contents. */ ++#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */ + #define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 + #define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ + #define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ +@@ -398,6 +400,18 @@ + #define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 + #define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ + ++/** Macros to determine SPROM presence based on Chip-Status register. */ ++#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \ ++ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ ++ SSB_CHIPCO_CHST_4325_OTP_SEL) ++#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \ ++ (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS) ++#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \ ++ (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ ++ SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \ ++ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ ++ SSB_CHIPCO_CHST_4325_OTP_SEL)) ++ + + + /** Clockcontrol masks and values **/ +@@ -564,6 +578,7 @@ struct ssb_chipcommon_pmu { + struct ssb_chipcommon { + struct ssb_device *dev; + u32 capabilities; ++ u32 status; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + struct ssb_chipcommon_pmu pmu; +-- +1.7.0.1 + diff --git a/thinkpad-acpi-add-x100e.patch b/thinkpad-acpi-add-x100e.patch new file mode 100644 index 000000000..216fb8961 --- /dev/null +++ b/thinkpad-acpi-add-x100e.patch @@ -0,0 +1,11 @@ +diff -up linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c +--- linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg 2010-04-21 10:07:07.690036629 -0400 ++++ linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-04-21 10:07:24.227030266 -0400 +@@ -507,6 +507,7 @@ TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA. + "\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */ + "\\_SB.PCI0.AD4S.EC0", /* i1400, R30 */ + "\\_SB.PCI0.ICH3.EC0", /* R31 */ ++ "\\_SB.PCI0.LPC0.EC", /* X100e */ + "\\_SB.PCI0.LPC.EC", /* all others */ + ); + diff --git a/thinkpad-acpi-fix-backlight.patch b/thinkpad-acpi-fix-backlight.patch new file mode 100644 index 000000000..5ed2544fe --- /dev/null +++ b/thinkpad-acpi-fix-backlight.patch @@ -0,0 +1,56 @@ +diff -up linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c.orig linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c +--- linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c.orig 2010-05-17 16:28:13.254200070 -0400 ++++ linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-05-17 16:29:56.471200083 -0400 +@@ -3397,7 +3397,7 @@ static int __init hotkey_init(struct ibm + /* update bright_acpimode... */ + tpacpi_check_std_acpi_brightness_support(); + +- if (tp_features.bright_acpimode && acpi_video_backlight_support()) { ++ if (acpi_video_backlight_support()) { + printk(TPACPI_INFO + "This ThinkPad has standard ACPI backlight " + "brightness control, supported by the ACPI " +@@ -6189,26 +6189,24 @@ static int __init brightness_init(struct + * going to publish a backlight interface + */ + b = tpacpi_check_std_acpi_brightness_support(); +- if (b > 0) { + +- if (acpi_video_backlight_support()) { +- if (brightness_enable > 1) { +- printk(TPACPI_NOTICE +- "Standard ACPI backlight interface " +- "available, not loading native one.\n"); +- return 1; +- } else if (brightness_enable == 1) { +- printk(TPACPI_NOTICE +- "Backlight control force enabled, even if standard " +- "ACPI backlight interface is available\n"); +- } +- } else { +- if (brightness_enable > 1) { +- printk(TPACPI_NOTICE +- "Standard ACPI backlight interface not " +- "available, thinkpad_acpi native " +- "brightness control enabled\n"); +- } ++ if (acpi_video_backlight_support()) { ++ if (brightness_enable > 1) { ++ printk(TPACPI_NOTICE ++ "Standard ACPI backlight interface " ++ "available, not loading native one.\n"); ++ return 1; ++ } else if (brightness_enable == 1) { ++ printk(TPACPI_NOTICE ++ "Backlight control force enabled, even if standard " ++ "ACPI backlight interface is available\n"); ++ } ++ } else { ++ if (brightness_enable > 1) { ++ printk(TPACPI_NOTICE ++ "Standard ACPI backlight interface not " ++ "available, thinkpad_acpi native " ++ "brightness control enabled\n"); + } + } + diff --git a/usb-obey-the-sysfs-power-wakeup-setting.patch b/usb-obey-the-sysfs-power-wakeup-setting.patch new file mode 100644 index 000000000..ce2bf2fce --- /dev/null +++ b/usb-obey-the-sysfs-power-wakeup-setting.patch @@ -0,0 +1,65 @@ +From: Alan Stern +Date: Tue, 22 Jun 2010 20:14:48 +0000 (-0400) +Subject: USB: obey the sysfs power/wakeup setting +X-Git-Tag: v2.6.35-rc4~18^2~6 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=48826626263d4a61d06fd8c5805da31f925aefa0 + +USB: obey the sysfs power/wakeup setting + +This patch (as1403) is a partial reversion of an earlier change +(commit 5f677f1d45b2bf08085bbba7394392dfa586fa8e "USB: fix remote +wakeup settings during system sleep"). After hearing from a user, I +realized that remote wakeup should be enabled during system sleep +whenever userspace allows it, and not only if a driver requests it +too. + +Indeed, there could be a device with no driver, that does nothing but +generate a wakeup request when the user presses a button. Such a +device should be allowed to do its job. + +The problem fixed by the earlier patch -- device generating a wakeup +request for no reason, causing system suspend to abort -- was also +addressed by a later patch ("USB: don't enable remote wakeup by +default", accepted but not yet merged into mainline). The device +won't be able to generate the bogus wakeup requests because it will be +disabled for remote wakeup by default. Hence this reversion will not +re-introduce any old problems. + +Signed-off-by: Alan Stern +Cc: stable [.34] +Signed-off-by: Greg Kroah-Hartman +--- + +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c +index de98a94..a6bd53a 100644 +--- a/drivers/usb/core/driver.c ++++ b/drivers/usb/core/driver.c +@@ -1272,8 +1272,7 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) + + static void choose_wakeup(struct usb_device *udev, pm_message_t msg) + { +- int w, i; +- struct usb_interface *intf; ++ int w; + + /* Remote wakeup is needed only when we actually go to sleep. + * For things like FREEZE and QUIESCE, if the device is already +@@ -1285,16 +1284,10 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg) + return; + } + +- /* If remote wakeup is permitted, see whether any interface drivers ++ /* Enable remote wakeup if it is allowed, even if no interface drivers + * actually want it. + */ +- w = 0; +- if (device_may_wakeup(&udev->dev) && udev->actconfig) { +- for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { +- intf = udev->actconfig->interface[i]; +- w |= intf->needs_remote_wakeup; +- } +- } ++ w = device_may_wakeup(&udev->dev); + + /* If the device is autosuspended with the wrong wakeup setting, + * autoresume now so the setting can be changed. diff --git a/virt_console-rollup.patch b/virt_console-rollup.patch new file mode 100644 index 000000000..57fd9b293 --- /dev/null +++ b/virt_console-rollup.patch @@ -0,0 +1,1031 @@ +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 48ce834..8c99bf1 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -33,35 +33,6 @@ + #include + #include "hvc_console.h" + +-/* Moved here from .h file in order to disable MULTIPORT. */ +-#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ +- +-struct virtio_console_multiport_conf { +- struct virtio_console_config config; +- /* max. number of ports this device can hold */ +- __u32 max_nr_ports; +- /* number of ports added so far */ +- __u32 nr_ports; +-} __attribute__((packed)); +- +-/* +- * A message that's passed between the Host and the Guest for a +- * particular port. +- */ +-struct virtio_console_control { +- __u32 id; /* Port number */ +- __u16 event; /* The kind of control event (see below) */ +- __u16 value; /* Extra information for the key */ +-}; +- +-/* Some events for control messages */ +-#define VIRTIO_CONSOLE_PORT_READY 0 +-#define VIRTIO_CONSOLE_CONSOLE_PORT 1 +-#define VIRTIO_CONSOLE_RESIZE 2 +-#define VIRTIO_CONSOLE_PORT_OPEN 3 +-#define VIRTIO_CONSOLE_PORT_NAME 4 +-#define VIRTIO_CONSOLE_PORT_REMOVE 5 +- + /* + * This is a global struct for storing common data for all the devices + * this driver handles. +@@ -107,6 +78,9 @@ struct console { + /* The hvc device associated with this console port */ + struct hvc_struct *hvc; + ++ /* The size of the console */ ++ struct winsize ws; ++ + /* + * This number identifies the number that we used to register + * with hvc in hvc_instantiate() and hvc_alloc(); this is the +@@ -139,7 +113,6 @@ struct ports_device { + * notification + */ + struct work_struct control_work; +- struct work_struct config_work; + + struct list_head ports; + +@@ -150,7 +123,7 @@ struct ports_device { + spinlock_t cvq_lock; + + /* The current config space is stored here */ +- struct virtio_console_multiport_conf config; ++ struct virtio_console_config config; + + /* The virtio device we're associated with */ + struct virtio_device *vdev; +@@ -189,6 +162,9 @@ struct port { + */ + spinlock_t inbuf_lock; + ++ /* Protect the operations on the out_vq. */ ++ spinlock_t outvq_lock; ++ + /* The IO vqs for this port */ + struct virtqueue *in_vq, *out_vq; + +@@ -214,6 +190,8 @@ struct port { + /* The 'id' to identify the port with the Host */ + u32 id; + ++ bool outvq_full; ++ + /* Is the host device open */ + bool host_connected; + +@@ -403,22 +381,22 @@ out: + return ret; + } + +-static ssize_t send_control_msg(struct port *port, unsigned int event, +- unsigned int value) ++static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, ++ unsigned int event, unsigned int value) + { + struct scatterlist sg[1]; + struct virtio_console_control cpkt; + struct virtqueue *vq; + unsigned int len; + +- if (!use_multiport(port->portdev)) ++ if (!use_multiport(portdev)) + return 0; + +- cpkt.id = port->id; ++ cpkt.id = port_id; + cpkt.event = event; + cpkt.value = value; + +- vq = port->portdev->c_ovq; ++ vq = portdev->c_ovq; + + sg_init_one(sg, &cpkt, sizeof(cpkt)); + if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { +@@ -429,15 +407,39 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, + return 0; + } + +-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) ++static ssize_t send_control_msg(struct port *port, unsigned int event, ++ unsigned int value) ++{ ++ return __send_control_msg(port->portdev, port->id, event, value); ++} ++ ++/* Callers must take the port->outvq_lock */ ++static void reclaim_consumed_buffers(struct port *port) ++{ ++ void *buf; ++ unsigned int len; ++ ++ while ((buf = virtqueue_get_buf(port->out_vq, &len))) { ++ kfree(buf); ++ port->outvq_full = false; ++ } ++} ++ ++static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, ++ bool nonblock) + { + struct scatterlist sg[1]; + struct virtqueue *out_vq; + ssize_t ret; ++ unsigned long flags; + unsigned int len; + + out_vq = port->out_vq; + ++ spin_lock_irqsave(&port->outvq_lock, flags); ++ ++ reclaim_consumed_buffers(port); ++ + sg_init_one(sg, in_buf, in_count); + ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); + +@@ -446,14 +448,29 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) + + if (ret < 0) { + in_count = 0; +- goto fail; ++ goto done; + } + +- /* Wait till the host acknowledges it pushed out the data we sent. */ ++ if (ret == 0) ++ port->outvq_full = true; ++ ++ if (nonblock) ++ goto done; ++ ++ /* ++ * Wait till the host acknowledges it pushed out the data we ++ * sent. This is done for ports in blocking mode or for data ++ * from the hvc_console; the tty operations are performed with ++ * spinlocks held so we can't sleep here. ++ */ + while (!virtqueue_get_buf(out_vq, &len)) + cpu_relax(); +-fail: +- /* We're expected to return the amount of data we wrote */ ++done: ++ spin_unlock_irqrestore(&port->outvq_lock, flags); ++ /* ++ * We're expected to return the amount of data we wrote -- all ++ * of it ++ */ + return in_count; + } + +@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, + } + + /* The condition that must be true for polling to end */ +-static bool wait_is_over(struct port *port) ++static bool will_read_block(struct port *port) ++{ ++ return !port_has_data(port) && port->host_connected; ++} ++ ++static bool will_write_block(struct port *port) + { +- return port_has_data(port) || !port->host_connected; ++ bool ret; ++ ++ if (!port->host_connected) ++ return true; ++ ++ spin_lock_irq(&port->outvq_lock); ++ /* ++ * Check if the Host has consumed any buffers since we last ++ * sent data (this is only applicable for nonblocking ports). ++ */ ++ reclaim_consumed_buffers(port); ++ ret = port->outvq_full; ++ spin_unlock_irq(&port->outvq_lock); ++ ++ return ret; + } + + static ssize_t port_fops_read(struct file *filp, char __user *ubuf, +@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, + return -EAGAIN; + + ret = wait_event_interruptible(port->waitqueue, +- wait_is_over(port)); ++ !will_read_block(port)); + if (ret < 0) + return ret; + } +@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, + struct port *port; + char *buf; + ssize_t ret; ++ bool nonblock; + + port = filp->private_data; + ++ nonblock = filp->f_flags & O_NONBLOCK; ++ ++ if (will_write_block(port)) { ++ if (nonblock) ++ return -EAGAIN; ++ ++ ret = wait_event_interruptible(port->waitqueue, ++ !will_write_block(port)); ++ if (ret < 0) ++ return ret; ++ } ++ + count = min((size_t)(32 * 1024), count); + + buf = kmalloc(count, GFP_KERNEL); +@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, + goto free_buf; + } + +- ret = send_buf(port, buf, count); ++ ret = send_buf(port, buf, count, nonblock); ++ ++ if (nonblock && ret > 0) ++ goto out; ++ + free_buf: + kfree(buf); ++out: + return ret; + } + +@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) + ret = 0; + if (port->inbuf) + ret |= POLLIN | POLLRDNORM; +- if (port->host_connected) ++ if (!will_write_block(port)) + ret |= POLLOUT; + if (!port->host_connected) + ret |= POLLHUP; +@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp) + + spin_unlock_irq(&port->inbuf_lock); + ++ spin_lock_irq(&port->outvq_lock); ++ reclaim_consumed_buffers(port); ++ spin_unlock_irq(&port->outvq_lock); ++ + return 0; + } + +@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp) + port->guest_connected = true; + spin_unlock_irq(&port->inbuf_lock); + ++ spin_lock_irq(&port->outvq_lock); ++ /* ++ * There might be a chance that we missed reclaiming a few ++ * buffers in the window of the port getting previously closed ++ * and opening now. ++ */ ++ reclaim_consumed_buffers(port); ++ spin_unlock_irq(&port->outvq_lock); ++ + /* Notify host of port being opened */ + send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); + +@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count) + + port = find_port_by_vtermno(vtermno); + if (!port) +- return 0; ++ return -EPIPE; + +- return send_buf(port, (void *)buf, count); ++ return send_buf(port, (void *)buf, count, false); + } + + /* +@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count) + { + struct port *port; + ++ /* If we've not set up the port yet, we have no input to give. */ ++ if (unlikely(early_put_chars)) ++ return 0; ++ + port = find_port_by_vtermno(vtermno); + if (!port) +- return 0; ++ return -EPIPE; + + /* If we don't have an input queue yet, we can't get input. */ + BUG_ON(!port->in_vq); +@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count) + static void resize_console(struct port *port) + { + struct virtio_device *vdev; +- struct winsize ws; + + /* The port could have been hot-unplugged */ +- if (!port) ++ if (!port || !is_console_port(port)) + return; + + vdev = port->portdev->vdev; +- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { +- vdev->config->get(vdev, +- offsetof(struct virtio_console_config, cols), +- &ws.ws_col, sizeof(u16)); +- vdev->config->get(vdev, +- offsetof(struct virtio_console_config, rows), +- &ws.ws_row, sizeof(u16)); +- hvc_resize(port->cons.hvc, ws); +- } ++ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) ++ hvc_resize(port->cons.hvc, port->cons.ws); + } + + /* We set the configuration at this point, since we now have a tty */ +@@ -804,6 +867,13 @@ int init_port_console(struct port *port) + spin_unlock_irq(&pdrvdata_lock); + port->guest_connected = true; + ++ /* ++ * Start using the new console output if this is the first ++ * console to come up. ++ */ ++ if (early_put_chars) ++ early_put_chars = NULL; ++ + /* Notify host of port being opened */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); + +@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "host_connected: %d\n", port->host_connected); + out_offset += snprintf(buf + out_offset, out_count - out_offset, ++ "outvq_full: %d\n", port->outvq_full); ++ out_offset += snprintf(buf + out_offset, out_count - out_offset, + "is_console: %s\n", + is_console_port(port) ? "yes" : "no"); + out_offset += snprintf(buf + out_offset, out_count - out_offset, +@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = { + .read = debugfs_read, + }; + ++static void set_console_size(struct port *port, u16 rows, u16 cols) ++{ ++ if (!port || !is_console_port(port)) ++ return; ++ ++ port->cons.ws.ws_row = rows; ++ port->cons.ws.ws_col = cols; ++} ++ ++static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) ++{ ++ struct port_buffer *buf; ++ unsigned int nr_added_bufs; ++ int ret; ++ ++ nr_added_bufs = 0; ++ do { ++ buf = alloc_buf(PAGE_SIZE); ++ if (!buf) ++ break; ++ ++ spin_lock_irq(lock); ++ ret = add_inbuf(vq, buf); ++ if (ret < 0) { ++ spin_unlock_irq(lock); ++ free_buf(buf); ++ break; ++ } ++ nr_added_bufs++; ++ spin_unlock_irq(lock); ++ } while (ret > 0); ++ ++ return nr_added_bufs; ++} ++ ++static int add_port(struct ports_device *portdev, u32 id) ++{ ++ char debugfs_name[16]; ++ struct port *port; ++ struct port_buffer *buf; ++ dev_t devt; ++ unsigned int nr_added_bufs; ++ int err; ++ ++ port = kmalloc(sizeof(*port), GFP_KERNEL); ++ if (!port) { ++ err = -ENOMEM; ++ goto fail; ++ } ++ ++ port->portdev = portdev; ++ port->id = id; ++ ++ port->name = NULL; ++ port->inbuf = NULL; ++ port->cons.hvc = NULL; ++ ++ port->cons.ws.ws_row = port->cons.ws.ws_col = 0; ++ ++ port->host_connected = port->guest_connected = false; ++ ++ port->outvq_full = false; ++ ++ port->in_vq = portdev->in_vqs[port->id]; ++ port->out_vq = portdev->out_vqs[port->id]; ++ ++ cdev_init(&port->cdev, &port_fops); ++ ++ devt = MKDEV(portdev->chr_major, id); ++ err = cdev_add(&port->cdev, devt, 1); ++ if (err < 0) { ++ dev_err(&port->portdev->vdev->dev, ++ "Error %d adding cdev for port %u\n", err, id); ++ goto free_port; ++ } ++ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, ++ devt, port, "vport%up%u", ++ port->portdev->drv_index, id); ++ if (IS_ERR(port->dev)) { ++ err = PTR_ERR(port->dev); ++ dev_err(&port->portdev->vdev->dev, ++ "Error %d creating device for port %u\n", ++ err, id); ++ goto free_cdev; ++ } ++ ++ spin_lock_init(&port->inbuf_lock); ++ spin_lock_init(&port->outvq_lock); ++ init_waitqueue_head(&port->waitqueue); ++ ++ /* Fill the in_vq with buffers so the host can send us data. */ ++ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); ++ if (!nr_added_bufs) { ++ dev_err(port->dev, "Error allocating inbufs\n"); ++ err = -ENOMEM; ++ goto free_device; ++ } ++ ++ /* ++ * If we're not using multiport support, this has to be a console port ++ */ ++ if (!use_multiport(port->portdev)) { ++ err = init_port_console(port); ++ if (err) ++ goto free_inbufs; ++ } ++ ++ spin_lock_irq(&portdev->ports_lock); ++ list_add_tail(&port->list, &port->portdev->ports); ++ spin_unlock_irq(&portdev->ports_lock); ++ ++ /* ++ * Tell the Host we're set so that it can send us various ++ * configuration parameters for this port (eg, port name, ++ * caching, whether this is a console port, etc.) ++ */ ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); ++ ++ if (pdrvdata.debugfs_dir) { ++ /* ++ * Finally, create the debugfs file that we can use to ++ * inspect a port's state at any time ++ */ ++ sprintf(debugfs_name, "vport%up%u", ++ port->portdev->drv_index, id); ++ port->debugfs_file = debugfs_create_file(debugfs_name, 0444, ++ pdrvdata.debugfs_dir, ++ port, ++ &port_debugfs_ops); ++ } ++ return 0; ++ ++free_inbufs: ++ while ((buf = virtqueue_detach_unused_buf(port->in_vq))) ++ free_buf(buf); ++free_device: ++ device_destroy(pdrvdata.class, port->dev->devt); ++free_cdev: ++ cdev_del(&port->cdev); ++free_port: ++ kfree(port); ++fail: ++ /* The host might want to notify management sw about port add failure */ ++ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); ++ return err; ++} ++ + /* Remove all port-specific data. */ + static int remove_port(struct port *port) + { +@@ -888,7 +1107,18 @@ static int remove_port(struct port *port) + spin_lock_irq(&pdrvdata_lock); + list_del(&port->cons.list); + spin_unlock_irq(&pdrvdata_lock); ++#if 0 ++ /* ++ * hvc_remove() not called as removing one hvc port ++ * results in other hvc ports getting frozen. ++ * ++ * Once this is resolved in hvc, this functionality ++ * will be enabled. Till that is done, the -EPIPE ++ * return from get_chars() above will help ++ * hvc_console.c to clean up on ports we remove here. ++ */ + hvc_remove(port->cons.hvc); ++#endif + } + if (port->guest_connected) + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); +@@ -900,6 +1130,8 @@ static int remove_port(struct port *port) + /* Remove unused data this port might have received. */ + discard_port_data(port); + ++ reclaim_consumed_buffers(port); ++ + /* Remove buffers we queued up for the Host to send us data in. */ + while ((buf = virtqueue_detach_unused_buf(port->in_vq))) + free_buf(buf); +@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev, + cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); + + port = find_port_by_id(portdev, cpkt->id); +- if (!port) { ++ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { + /* No valid header at start of buffer. Drop it. */ + dev_dbg(&portdev->vdev->dev, + "Invalid index %u in control packet\n", cpkt->id); +@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev, + } + + switch (cpkt->event) { ++ case VIRTIO_CONSOLE_PORT_ADD: ++ if (port) { ++ dev_dbg(&portdev->vdev->dev, ++ "Port %u already added\n", port->id); ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); ++ break; ++ } ++ if (cpkt->id >= portdev->config.max_nr_ports) { ++ dev_warn(&portdev->vdev->dev, ++ "Request for adding port with out-of-bound id %u, max. supported id: %u\n", ++ cpkt->id, portdev->config.max_nr_ports - 1); ++ break; ++ } ++ add_port(portdev, cpkt->id); ++ break; ++ case VIRTIO_CONSOLE_PORT_REMOVE: ++ remove_port(port); ++ break; + case VIRTIO_CONSOLE_CONSOLE_PORT: + if (!cpkt->value) + break; +@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev, + * have to notify the host first. + */ + break; +- case VIRTIO_CONSOLE_RESIZE: ++ case VIRTIO_CONSOLE_RESIZE: { ++ struct { ++ __u16 rows; ++ __u16 cols; ++ } size; ++ + if (!is_console_port(port)) + break; ++ ++ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), ++ sizeof(size)); ++ set_console_size(port, size.rows, size.cols); ++ + port->cons.hvc->irq_requested = 1; + resize_console(port); + break; ++ } + case VIRTIO_CONSOLE_PORT_OPEN: + port->host_connected = cpkt->value; + wake_up_interruptible(&port->waitqueue); ++ /* ++ * If the host port got closed and the host had any ++ * unconsumed buffers, we'll be able to reclaim them ++ * now. ++ */ ++ spin_lock_irq(&port->outvq_lock); ++ reclaim_consumed_buffers(port); ++ spin_unlock_irq(&port->outvq_lock); + break; + case VIRTIO_CONSOLE_PORT_NAME: + /* +@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev, + kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); + } + break; +- case VIRTIO_CONSOLE_PORT_REMOVE: +- /* +- * Hot unplug the port. We don't decrement nr_ports +- * since we don't want to deal with extra complexities +- * of using the lowest-available port id: We can just +- * pick up the nr_ports number as the id and not have +- * userspace send it to us. This helps us in two +- * ways: +- * +- * - We don't need to have a 'port_id' field in the +- * config space when a port is hot-added. This is a +- * good thing as we might queue up multiple hotplug +- * requests issued in our workqueue. +- * +- * - Another way to deal with this would have been to +- * use a bitmap of the active ports and select the +- * lowest non-active port from that map. That +- * bloats the already tight config space and we +- * would end up artificially limiting the +- * max. number of ports to sizeof(bitmap). Right +- * now we can support 2^32 ports (as the port id is +- * stored in a u32 type). +- * +- */ +- remove_port(port); +- break; + } + } + +@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev) + struct ports_device *portdev; + + portdev = vdev->priv; +- if (use_multiport(portdev)) { +- /* Handle port hot-add */ +- schedule_work(&portdev->config_work); +- } +- /* +- * We'll use this way of resizing only for legacy support. +- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use +- * control messages to indicate console size changes so that +- * it can be done per-port +- */ +- resize_console(find_port_by_id(portdev, 0)); +-} + +-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) +-{ +- struct port_buffer *buf; +- unsigned int nr_added_bufs; +- int ret; +- +- nr_added_bufs = 0; +- do { +- buf = alloc_buf(PAGE_SIZE); +- if (!buf) +- break; +- +- spin_lock_irq(lock); +- ret = add_inbuf(vq, buf); +- if (ret < 0) { +- spin_unlock_irq(lock); +- free_buf(buf); +- break; +- } +- nr_added_bufs++; +- spin_unlock_irq(lock); +- } while (ret > 0); +- +- return nr_added_bufs; +-} +- +-static int add_port(struct ports_device *portdev, u32 id) +-{ +- char debugfs_name[16]; +- struct port *port; +- struct port_buffer *buf; +- dev_t devt; +- unsigned int nr_added_bufs; +- int err; +- +- port = kmalloc(sizeof(*port), GFP_KERNEL); +- if (!port) { +- err = -ENOMEM; +- goto fail; +- } +- +- port->portdev = portdev; +- port->id = id; +- +- port->name = NULL; +- port->inbuf = NULL; +- port->cons.hvc = NULL; +- +- port->host_connected = port->guest_connected = false; +- +- port->in_vq = portdev->in_vqs[port->id]; +- port->out_vq = portdev->out_vqs[port->id]; +- +- cdev_init(&port->cdev, &port_fops); +- +- devt = MKDEV(portdev->chr_major, id); +- err = cdev_add(&port->cdev, devt, 1); +- if (err < 0) { +- dev_err(&port->portdev->vdev->dev, +- "Error %d adding cdev for port %u\n", err, id); +- goto free_port; +- } +- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, +- devt, port, "vport%up%u", +- port->portdev->drv_index, id); +- if (IS_ERR(port->dev)) { +- err = PTR_ERR(port->dev); +- dev_err(&port->portdev->vdev->dev, +- "Error %d creating device for port %u\n", +- err, id); +- goto free_cdev; +- } +- +- spin_lock_init(&port->inbuf_lock); +- init_waitqueue_head(&port->waitqueue); +- +- /* Fill the in_vq with buffers so the host can send us data. */ +- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); +- if (!nr_added_bufs) { +- dev_err(port->dev, "Error allocating inbufs\n"); +- err = -ENOMEM; +- goto free_device; +- } +- +- /* +- * If we're not using multiport support, this has to be a console port +- */ +- if (!use_multiport(port->portdev)) { +- err = init_port_console(port); +- if (err) +- goto free_inbufs; +- } +- +- spin_lock_irq(&portdev->ports_lock); +- list_add_tail(&port->list, &port->portdev->ports); +- spin_unlock_irq(&portdev->ports_lock); +- +- /* +- * Tell the Host we're set so that it can send us various +- * configuration parameters for this port (eg, port name, +- * caching, whether this is a console port, etc.) +- */ +- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); +- +- if (pdrvdata.debugfs_dir) { +- /* +- * Finally, create the debugfs file that we can use to +- * inspect a port's state at any time +- */ +- sprintf(debugfs_name, "vport%up%u", +- port->portdev->drv_index, id); +- port->debugfs_file = debugfs_create_file(debugfs_name, 0444, +- pdrvdata.debugfs_dir, +- port, +- &port_debugfs_ops); +- } +- return 0; +- +-free_inbufs: +- while ((buf = virtqueue_detach_unused_buf(port->in_vq))) +- free_buf(buf); +-free_device: +- device_destroy(pdrvdata.class, port->dev->devt); +-free_cdev: +- cdev_del(&port->cdev); +-free_port: +- kfree(port); +-fail: +- return err; +-} +- +-/* +- * The workhandler for config-space updates. +- * +- * This is called when ports are hot-added. +- */ +-static void config_work_handler(struct work_struct *work) +-{ +- struct virtio_console_multiport_conf virtconconf; +- struct ports_device *portdev; +- struct virtio_device *vdev; +- int err; ++ if (!use_multiport(portdev)) { ++ struct port *port; ++ u16 rows, cols; + +- portdev = container_of(work, struct ports_device, config_work); ++ vdev->config->get(vdev, ++ offsetof(struct virtio_console_config, cols), ++ &cols, sizeof(u16)); ++ vdev->config->get(vdev, ++ offsetof(struct virtio_console_config, rows), ++ &rows, sizeof(u16)); + +- vdev = portdev->vdev; +- vdev->config->get(vdev, +- offsetof(struct virtio_console_multiport_conf, +- nr_ports), +- &virtconconf.nr_ports, +- sizeof(virtconconf.nr_ports)); ++ port = find_port_by_id(portdev, 0); ++ set_console_size(port, rows, cols); + +- if (portdev->config.nr_ports == virtconconf.nr_ports) { + /* +- * Port 0 got hot-added. Since we already did all the +- * other initialisation for it, just tell the Host +- * that the port is ready if we find the port. In +- * case the port was hot-removed earlier, we call +- * add_port to add the port. ++ * We'll use this way of resizing only for legacy ++ * support. For newer userspace ++ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages ++ * to indicate console size changes so that it can be ++ * done per-port. + */ +- struct port *port; +- +- port = find_port_by_id(portdev, 0); +- if (!port) +- add_port(portdev, 0); +- else +- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); +- return; +- } +- if (virtconconf.nr_ports > portdev->config.max_nr_ports) { +- dev_warn(&vdev->dev, +- "More ports specified (%u) than allowed (%u)", +- portdev->config.nr_ports + 1, +- portdev->config.max_nr_ports); +- return; +- } +- if (virtconconf.nr_ports < portdev->config.nr_ports) +- return; +- +- /* Hot-add ports */ +- while (virtconconf.nr_ports - portdev->config.nr_ports) { +- err = add_port(portdev, portdev->config.nr_ports); +- if (err) +- break; +- portdev->config.nr_ports++; ++ resize_console(port); + } + } + +@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = { + static int __devinit virtcons_probe(struct virtio_device *vdev) + { + struct ports_device *portdev; +- u32 i; + int err; + bool multiport; + +@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) + } + + multiport = false; +- portdev->config.nr_ports = 1; + portdev->config.max_nr_ports = 1; +-#if 0 /* Multiport is not quite ready yet --RR */ + if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { + multiport = true; + vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; + +- vdev->config->get(vdev, +- offsetof(struct virtio_console_multiport_conf, +- nr_ports), +- &portdev->config.nr_ports, +- sizeof(portdev->config.nr_ports)); +- vdev->config->get(vdev, +- offsetof(struct virtio_console_multiport_conf, +- max_nr_ports), ++ vdev->config->get(vdev, offsetof(struct virtio_console_config, ++ max_nr_ports), + &portdev->config.max_nr_ports, + sizeof(portdev->config.max_nr_ports)); +- if (portdev->config.nr_ports > portdev->config.max_nr_ports) { +- dev_warn(&vdev->dev, +- "More ports (%u) specified than allowed (%u). Will init %u ports.", +- portdev->config.nr_ports, +- portdev->config.max_nr_ports, +- portdev->config.max_nr_ports); +- +- portdev->config.nr_ports = portdev->config.max_nr_ports; +- } + } + + /* Let the Host know we support multiple ports.*/ + vdev->config->finalize_features(vdev); +-#endif + + err = init_vqs(portdev); + if (err < 0) { +@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) + + spin_lock_init(&portdev->cvq_lock); + INIT_WORK(&portdev->control_work, &control_work_handler); +- INIT_WORK(&portdev->config_work, &config_work_handler); + + nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); + if (!nr_added_bufs) { +@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) + err = -ENOMEM; + goto free_vqs; + } ++ } else { ++ /* ++ * For backward compatibility: Create a console port ++ * if we're running on older host. ++ */ ++ add_port(portdev, 0); + } + +- for (i = 0; i < portdev->config.nr_ports; i++) +- add_port(portdev, i); +- +- /* Start using the new console output. */ +- early_put_chars = NULL; ++ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, ++ VIRTIO_CONSOLE_DEVICE_READY, 1); + return 0; + + free_vqs: ++ /* The host might want to notify mgmt sw about device add failure */ ++ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, ++ VIRTIO_CONSOLE_DEVICE_READY, 0); + vdev->config->del_vqs(vdev); + kfree(portdev->in_vqs); + kfree(portdev->out_vqs); +@@ -1529,7 +1583,6 @@ static void virtcons_remove(struct virtio_device *vdev) + portdev = vdev->priv; + + cancel_work_sync(&portdev->control_work); +- cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + remove_port(port); +@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = { + + static unsigned int features[] = { + VIRTIO_CONSOLE_F_SIZE, ++ VIRTIO_CONSOLE_F_MULTIPORT, + }; + + static struct virtio_driver virtio_console = { +diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h +index 92228a8..a85064d 100644 +--- a/include/linux/virtio_console.h ++++ b/include/linux/virtio_console.h +@@ -12,14 +12,39 @@ + + /* Feature bits */ + #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ ++#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ ++ ++#define VIRTIO_CONSOLE_BAD_ID (~(u32)0) + + struct virtio_console_config { + /* colums of the screens */ + __u16 cols; + /* rows of the screens */ + __u16 rows; ++ /* max. number of ports this device can hold */ ++ __u32 max_nr_ports; + } __attribute__((packed)); + ++/* ++ * A message that's passed between the Host and the Guest for a ++ * particular port. ++ */ ++struct virtio_console_control { ++ __u32 id; /* Port number */ ++ __u16 event; /* The kind of control event (see below) */ ++ __u16 value; /* Extra information for the key */ ++}; ++ ++/* Some events for control messages */ ++#define VIRTIO_CONSOLE_DEVICE_READY 0 ++#define VIRTIO_CONSOLE_PORT_ADD 1 ++#define VIRTIO_CONSOLE_PORT_REMOVE 2 ++#define VIRTIO_CONSOLE_PORT_READY 3 ++#define VIRTIO_CONSOLE_CONSOLE_PORT 4 ++#define VIRTIO_CONSOLE_RESIZE 5 ++#define VIRTIO_CONSOLE_PORT_OPEN 6 ++#define VIRTIO_CONSOLE_PORT_NAME 7 ++ + #ifdef __KERNEL__ + int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); + #endif /* __KERNEL__ */ diff --git a/virtqueue-wrappers.patch b/virtqueue-wrappers.patch new file mode 100644 index 000000000..217570330 --- /dev/null +++ b/virtqueue-wrappers.patch @@ -0,0 +1,651 @@ +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c +index e32b24b..83fa09a 100644 +--- a/drivers/block/virtio_blk.c ++++ b/drivers/block/virtio_blk.c +@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq) + unsigned long flags; + + spin_lock_irqsave(&vblk->lock, flags); +- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { ++ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { + int error; + + switch (vbr->status) { +@@ -158,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, + } + } + +- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { ++ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { + mempool_free(vbr, vblk->pool); + return false; + } +@@ -187,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q) + } + + if (issued) +- vblk->vq->vq_ops->kick(vblk->vq); ++ virtqueue_kick(vblk->vq); + } + + static void virtblk_prepare_flush(struct request_queue *q, struct request *req) +diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c +index 64fe0a7..75f1cbd 100644 +--- a/drivers/char/hw_random/virtio-rng.c ++++ b/drivers/char/hw_random/virtio-rng.c +@@ -32,7 +32,7 @@ static bool busy; + static void random_recv_done(struct virtqueue *vq) + { + /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ +- if (!vq->vq_ops->get_buf(vq, &data_avail)) ++ if (!virtqueue_get_buf(vq, &data_avail)) + return; + + complete(&have_data); +@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size) + sg_init_one(&sg, buf, size); + + /* There should always be room for one buffer. */ +- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0) ++ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) + BUG(); + +- vq->vq_ops->kick(vq); ++ virtqueue_kick(vq); + } + + static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 196428c..48ce834 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -328,7 +328,7 @@ static void *get_inbuf(struct port *port) + unsigned int len; + + vq = port->in_vq; +- buf = vq->vq_ops->get_buf(vq, &len); ++ buf = virtqueue_get_buf(vq, &len); + if (buf) { + buf->len = len; + buf->offset = 0; +@@ -349,8 +349,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) + + sg_init_one(sg, buf->buf, buf->size); + +- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); +- vq->vq_ops->kick(vq); ++ ret = virtqueue_add_buf(vq, sg, 0, 1, buf); ++ virtqueue_kick(vq); + return ret; + } + +@@ -366,7 +366,7 @@ static void discard_port_data(struct port *port) + if (port->inbuf) + buf = port->inbuf; + else +- buf = vq->vq_ops->get_buf(vq, &len); ++ buf = virtqueue_get_buf(vq, &len); + + ret = 0; + while (buf) { +@@ -374,7 +374,7 @@ static void discard_port_data(struct port *port) + ret++; + free_buf(buf); + } +- buf = vq->vq_ops->get_buf(vq, &len); ++ buf = virtqueue_get_buf(vq, &len); + } + port->inbuf = NULL; + if (ret) +@@ -421,9 +421,9 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, + vq = port->portdev->c_ovq; + + sg_init_one(sg, &cpkt, sizeof(cpkt)); +- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { +- vq->vq_ops->kick(vq); +- while (!vq->vq_ops->get_buf(vq, &len)) ++ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { ++ virtqueue_kick(vq); ++ while (!virtqueue_get_buf(vq, &len)) + cpu_relax(); + } + return 0; +@@ -439,10 +439,10 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) + out_vq = port->out_vq; + + sg_init_one(sg, in_buf, in_count); +- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); ++ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); + + /* Tell Host to go! */ +- out_vq->vq_ops->kick(out_vq); ++ virtqueue_kick(out_vq); + + if (ret < 0) { + in_count = 0; +@@ -450,7 +450,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) + } + + /* Wait till the host acknowledges it pushed out the data we sent. */ +- while (!out_vq->vq_ops->get_buf(out_vq, &len)) ++ while (!virtqueue_get_buf(out_vq, &len)) + cpu_relax(); + fail: + /* We're expected to return the amount of data we wrote */ +@@ -901,7 +901,7 @@ static int remove_port(struct port *port) + discard_port_data(port); + + /* Remove buffers we queued up for the Host to send us data in. */ +- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) ++ while ((buf = virtqueue_detach_unused_buf(port->in_vq))) + free_buf(buf); + + kfree(port->name); +@@ -1030,7 +1030,7 @@ static void control_work_handler(struct work_struct *work) + vq = portdev->c_ivq; + + spin_lock(&portdev->cvq_lock); +- while ((buf = vq->vq_ops->get_buf(vq, &len))) { ++ while ((buf = virtqueue_get_buf(vq, &len))) { + spin_unlock(&portdev->cvq_lock); + + buf->len = len; +@@ -1224,7 +1224,7 @@ static int add_port(struct ports_device *portdev, u32 id) + return 0; + + free_inbufs: +- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) ++ while ((buf = virtqueue_detach_unused_buf(port->in_vq))) + free_buf(buf); + free_device: + device_destroy(pdrvdata.class, port->dev->devt); +@@ -1536,10 +1536,10 @@ static void virtcons_remove(struct virtio_device *vdev) + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + +- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) ++ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) + free_buf(buf); + +- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) ++ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) + free_buf(buf); + + vdev->config->del_vqs(vdev); +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index b0577dd..91738d8 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -119,7 +119,7 @@ static void skb_xmit_done(struct virtqueue *svq) + struct virtnet_info *vi = svq->vdev->priv; + + /* Suppress further interrupts. */ +- svq->vq_ops->disable_cb(svq); ++ virtqueue_disable_cb(svq); + + /* We were probably waiting for more output buffers. */ + netif_wake_queue(vi->dev); +@@ -207,7 +207,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) + return -EINVAL; + } + +- page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); ++ page = virtqueue_get_buf(vi->rvq, &len); + if (!page) { + pr_debug("%s: rx error: %d buffers missing\n", + skb->dev->name, hdr->mhdr.num_buffers); +@@ -339,7 +339,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) + + skb_to_sgvec(skb, sg + 1, 0, skb->len); + +- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); ++ err = virtqueue_add_buf(vi->rvq, sg, 0, 2, skb); + if (err < 0) + dev_kfree_skb(skb); + +@@ -386,7 +386,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) + + /* chain first in list head */ + first->private = (unsigned long)list; +- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, ++ err = virtqueue_add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, + first); + if (err < 0) + give_pages(vi, first); +@@ -406,7 +406,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) + + sg_init_one(&sg, page_address(page), PAGE_SIZE); + +- err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); ++ err = virtqueue_add_buf(vi->rvq, &sg, 0, 1, page); + if (err < 0) + give_pages(vi, page); + +@@ -435,7 +435,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) + } while (err > 0); + if (unlikely(vi->num > vi->max)) + vi->max = vi->num; +- vi->rvq->vq_ops->kick(vi->rvq); ++ virtqueue_kick(vi->rvq); + return !oom; + } + +@@ -444,7 +444,7 @@ static void skb_recv_done(struct virtqueue *rvq) + struct virtnet_info *vi = rvq->vdev->priv; + /* Schedule NAPI, Suppress further interrupts if successful. */ + if (napi_schedule_prep(&vi->napi)) { +- rvq->vq_ops->disable_cb(rvq); ++ virtqueue_disable_cb(rvq); + __napi_schedule(&vi->napi); + } + } +@@ -473,7 +473,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) + + again: + while (received < budget && +- (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { ++ (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { + receive_buf(vi->dev, buf, len); + --vi->num; + received++; +@@ -487,9 +487,9 @@ again: + /* Out of packets? */ + if (received < budget) { + napi_complete(napi); +- if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && ++ if (unlikely(!virtqueue_enable_cb(vi->rvq)) && + napi_schedule_prep(napi)) { +- vi->rvq->vq_ops->disable_cb(vi->rvq); ++ virtqueue_disable_cb(vi->rvq); + __napi_schedule(napi); + goto again; + } +@@ -503,7 +503,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) + struct sk_buff *skb; + unsigned int len, tot_sgs = 0; + +- while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { ++ while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { + pr_debug("Sent skb %p\n", skb); + vi->dev->stats.tx_bytes += skb->len; + vi->dev->stats.tx_packets++; +@@ -559,7 +559,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) + sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); + + hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; +- return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); ++ return virtqueue_add_buf(vi->svq, sg, hdr->num_sg, 0, skb); + } + + static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) +@@ -578,14 +578,14 @@ again: + if (unlikely(capacity < 0)) { + netif_stop_queue(dev); + dev_warn(&dev->dev, "Unexpected full queue\n"); +- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { +- vi->svq->vq_ops->disable_cb(vi->svq); ++ if (unlikely(!virtqueue_enable_cb(vi->svq))) { ++ virtqueue_disable_cb(vi->svq); + netif_start_queue(dev); + goto again; + } + return NETDEV_TX_BUSY; + } +- vi->svq->vq_ops->kick(vi->svq); ++ virtqueue_kick(vi->svq); + + /* Don't wait up for transmitted skbs to be freed. */ + skb_orphan(skb); +@@ -595,12 +595,12 @@ again: + * before it gets out of hand. Naturally, this wastes entries. */ + if (capacity < 2+MAX_SKB_FRAGS) { + netif_stop_queue(dev); +- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { ++ if (unlikely(!virtqueue_enable_cb(vi->svq))) { + /* More just got used, free them then recheck. */ + capacity += free_old_xmit_skbs(vi); + if (capacity >= 2+MAX_SKB_FRAGS) { + netif_start_queue(dev); +- vi->svq->vq_ops->disable_cb(vi->svq); ++ virtqueue_disable_cb(vi->svq); + } + } + } +@@ -645,7 +645,7 @@ static int virtnet_open(struct net_device *dev) + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { +- vi->rvq->vq_ops->disable_cb(vi->rvq); ++ virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } + return 0; +@@ -682,15 +682,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, + sg_set_buf(&sg[i + 1], sg_virt(s), s->length); + sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); + +- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); ++ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); + +- vi->cvq->vq_ops->kick(vi->cvq); ++ virtqueue_kick(vi->cvq); + + /* + * Spin for a response, the kick causes an ioport write, trapping + * into the hypervisor, so the request should be handled immediately. + */ +- while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) ++ while (!virtqueue_get_buf(vi->cvq, &tmp)) + cpu_relax(); + + return status == VIRTIO_NET_OK; +@@ -1006,13 +1006,13 @@ static void free_unused_bufs(struct virtnet_info *vi) + { + void *buf; + while (1) { +- buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); ++ buf = virtqueue_detach_unused_buf(vi->svq); + if (!buf) + break; + dev_kfree_skb(buf); + } + while (1) { +- buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); ++ buf = virtqueue_detach_unused_buf(vi->rvq); + if (!buf) + break; + if (vi->mergeable_rx_bufs || vi->big_packets) +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index bfec7c2..0f1da45 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq) + struct virtio_balloon *vb; + unsigned int len; + +- vb = vq->vq_ops->get_buf(vq, &len); ++ vb = virtqueue_get_buf(vq, &len); + if (vb) + complete(&vb->acked); + } +@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) + init_completion(&vb->acked); + + /* We should always be able to add one buffer to an empty queue. */ +- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) ++ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) + BUG(); +- vq->vq_ops->kick(vq); ++ virtqueue_kick(vq); + + /* When host has read buffer, this completes via balloon_ack */ + wait_for_completion(&vb->acked); +@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq) + struct virtio_balloon *vb; + unsigned int len; + +- vb = vq->vq_ops->get_buf(vq, &len); ++ vb = virtqueue_get_buf(vq, &len); + if (!vb) + return; + vb->need_stats_update = 1; +@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb) + + vq = vb->stats_vq; + sg_init_one(&sg, vb->stats, sizeof(vb->stats)); +- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) ++ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) + BUG(); +- vq->vq_ops->kick(vq); ++ virtqueue_kick(vq); + } + + static void virtballoon_changed(struct virtio_device *vdev) +@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev) + * use it to signal us later. + */ + sg_init_one(&sg, vb->stats, sizeof vb->stats); +- if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq, +- &sg, 1, 0, vb) < 0) ++ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0) + BUG(); +- vb->stats_vq->vq_ops->kick(vb->stats_vq); ++ virtqueue_kick(vb->stats_vq); + } + + vb->thread = kthread_run(balloon, vb, "vballoon"); +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index 0f90634..0717b5b 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -155,11 +155,11 @@ static int vring_add_indirect(struct vring_virtqueue *vq, + return head; + } + +-static int vring_add_buf(struct virtqueue *_vq, +- struct scatterlist sg[], +- unsigned int out, +- unsigned int in, +- void *data) ++int virtqueue_add_buf(struct virtqueue *_vq, ++ struct scatterlist sg[], ++ unsigned int out, ++ unsigned int in, ++ void *data) + { + struct vring_virtqueue *vq = to_vvq(_vq); + unsigned int i, avail, head, uninitialized_var(prev); +@@ -232,8 +232,9 @@ add_head: + return vq->num_free ? vq->vring.num : 0; + return vq->num_free; + } ++EXPORT_SYMBOL_GPL(virtqueue_add_buf); + +-static void vring_kick(struct virtqueue *_vq) ++void virtqueue_kick(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); + START_USE(vq); +@@ -253,6 +254,7 @@ static void vring_kick(struct virtqueue *_vq) + + END_USE(vq); + } ++EXPORT_SYMBOL_GPL(virtqueue_kick); + + static void detach_buf(struct vring_virtqueue *vq, unsigned int head) + { +@@ -284,7 +286,7 @@ static inline bool more_used(const struct vring_virtqueue *vq) + return vq->last_used_idx != vq->vring.used->idx; + } + +-static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) ++void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) + { + struct vring_virtqueue *vq = to_vvq(_vq); + void *ret; +@@ -325,15 +327,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) + END_USE(vq); + return ret; + } ++EXPORT_SYMBOL_GPL(virtqueue_get_buf); + +-static void vring_disable_cb(struct virtqueue *_vq) ++void virtqueue_disable_cb(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); + + vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + } ++EXPORT_SYMBOL_GPL(virtqueue_disable_cb); + +-static bool vring_enable_cb(struct virtqueue *_vq) ++bool virtqueue_enable_cb(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); + +@@ -351,8 +355,9 @@ static bool vring_enable_cb(struct virtqueue *_vq) + END_USE(vq); + return true; + } ++EXPORT_SYMBOL_GPL(virtqueue_enable_cb); + +-static void *vring_detach_unused_buf(struct virtqueue *_vq) ++void *virtqueue_detach_unused_buf(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); + unsigned int i; +@@ -375,6 +380,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq) + END_USE(vq); + return NULL; + } ++EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); + + irqreturn_t vring_interrupt(int irq, void *_vq) + { +@@ -396,15 +402,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq) + } + EXPORT_SYMBOL_GPL(vring_interrupt); + +-static struct virtqueue_ops vring_vq_ops = { +- .add_buf = vring_add_buf, +- .get_buf = vring_get_buf, +- .kick = vring_kick, +- .disable_cb = vring_disable_cb, +- .enable_cb = vring_enable_cb, +- .detach_unused_buf = vring_detach_unused_buf, +-}; +- + struct virtqueue *vring_new_virtqueue(unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, +@@ -429,7 +426,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, + vring_init(&vq->vring, num, pages, vring_align); + vq->vq.callback = callback; + vq->vq.vdev = vdev; +- vq->vq.vq_ops = &vring_vq_ops; + vq->vq.name = name; + vq->notify = notify; + vq->broken = false; +diff --git a/include/linux/virtio.h b/include/linux/virtio.h +index 40d1709..5b0fce0 100644 +--- a/include/linux/virtio.h ++++ b/include/linux/virtio.h +@@ -14,7 +14,6 @@ + * @callback: the function to call when buffers are consumed (can be NULL). + * @name: the name of this virtqueue (mainly for debugging) + * @vdev: the virtio device this queue was created for. +- * @vq_ops: the operations for this virtqueue (see below). + * @priv: a pointer for the virtqueue implementation to use. + */ + struct virtqueue { +@@ -22,60 +21,60 @@ struct virtqueue { + void (*callback)(struct virtqueue *vq); + const char *name; + struct virtio_device *vdev; +- struct virtqueue_ops *vq_ops; + void *priv; + }; + + /** +- * virtqueue_ops - operations for virtqueue abstraction layer +- * @add_buf: expose buffer to other end ++ * operations for virtqueue ++ * virtqueue_add_buf: expose buffer to other end + * vq: the struct virtqueue we're talking about. + * sg: the description of the buffer(s). + * out_num: the number of sg readable by other side + * in_num: the number of sg which are writable (after readable ones) + * data: the token identifying the buffer. + * Returns remaining capacity of queue (sg segments) or a negative error. +- * @kick: update after add_buf ++ * virtqueue_kick: update after add_buf + * vq: the struct virtqueue + * After one or more add_buf calls, invoke this to kick the other side. +- * @get_buf: get the next used buffer ++ * virtqueue_get_buf: get the next used buffer + * vq: the struct virtqueue we're talking about. + * len: the length written into the buffer + * Returns NULL or the "data" token handed to add_buf. +- * @disable_cb: disable callbacks ++ * virtqueue_disable_cb: disable callbacks + * vq: the struct virtqueue we're talking about. + * Note that this is not necessarily synchronous, hence unreliable and only + * useful as an optimization. +- * @enable_cb: restart callbacks after disable_cb. ++ * virtqueue_enable_cb: restart callbacks after disable_cb. + * vq: the struct virtqueue we're talking about. + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. +- * @detach_unused_buf: detach first unused buffer ++ * virtqueue_detach_unused_buf: detach first unused buffer + * vq: the struct virtqueue we're talking about. + * Returns NULL or the "data" token handed to add_buf + * + * Locking rules are straightforward: the driver is responsible for + * locking. No two operations may be invoked simultaneously, with the exception +- * of @disable_cb. ++ * of virtqueue_disable_cb. + * + * All operations can be called in any context. + */ +-struct virtqueue_ops { +- int (*add_buf)(struct virtqueue *vq, +- struct scatterlist sg[], +- unsigned int out_num, +- unsigned int in_num, +- void *data); + +- void (*kick)(struct virtqueue *vq); ++int virtqueue_add_buf(struct virtqueue *vq, ++ struct scatterlist sg[], ++ unsigned int out_num, ++ unsigned int in_num, ++ void *data); + +- void *(*get_buf)(struct virtqueue *vq, unsigned int *len); ++void virtqueue_kick(struct virtqueue *vq); + +- void (*disable_cb)(struct virtqueue *vq); +- bool (*enable_cb)(struct virtqueue *vq); +- void *(*detach_unused_buf)(struct virtqueue *vq); +-}; ++void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); ++ ++void virtqueue_disable_cb(struct virtqueue *vq); ++ ++bool virtqueue_enable_cb(struct virtqueue *vq); ++ ++void *virtqueue_detach_unused_buf(struct virtqueue *vq); + + /** + * virtio_device - representation of a device using virtio +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c +index 7eb78ec..dcfbe99 100644 +--- a/net/9p/trans_virtio.c ++++ b/net/9p/trans_virtio.c +@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq) + + P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); + +- while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { ++ while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { + P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); + P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); + req = p9_tag_lookup(chan->client, rc->tag); +@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) + + req->status = REQ_STATUS_SENT; + +- if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { ++ if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { + P9_DPRINTK(P9_DEBUG_TRANS, + "9p debug: virtio rpc add_buf returned failure"); + return -EIO; + } + +- chan->vq->vq_ops->kick(chan->vq); ++ virtqueue_kick(chan->vq); + + P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); + return 0; diff --git a/x86-debug-send-sigtrap-for-user-icebp.patch b/x86-debug-send-sigtrap-for-user-icebp.patch new file mode 100644 index 000000000..376fea20c --- /dev/null +++ b/x86-debug-send-sigtrap-for-user-icebp.patch @@ -0,0 +1,80 @@ +From: Frederic Weisbecker +Date: Wed, 30 Jun 2010 13:09:06 +0000 (+0200) +Subject: x86: Send a SIGTRAP for user icebp traps +X-Git-Tag: v2.6.35-rc4~2^2~2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a1e80fafc9f0742a1776a0490258cb64912411b0 + +x86: Send a SIGTRAP for user icebp traps + +Before we had a generic breakpoint layer, x86 used to send a +sigtrap for any debug event that happened in userspace, +except if it was caused by lazy dr7 switches. + +Currently we only send such signal for single step or breakpoint +events. + +However, there are three other kind of debug exceptions: + +- debug register access detected: trigger an exception if the + next instruction touches the debug registers. We don't use + it. +- task switch, but we don't use tss. +- icebp/int01 trap. This instruction (0xf1) is undocumented and + generates an int 1 exception. Unlike single step through TF + flag, it doesn't set the single step origin of the exception + in dr6. + +icebp then used to be reported in userspace using trap signals +but this have been incidentally broken with the new breakpoint +code. Reenable this. Since this is the only debug event that +doesn't set anything in dr6, this is all we have to check. + +This fixes a regression in Wine where World Of Warcraft got broken +as it uses this for software protection checks purposes. And +probably other apps do. + +Reported-and-tested-by: Alexandre Julliard +Signed-off-by: Frederic Weisbecker +Cc: Ingo Molnar +Cc: H. Peter Anvin +Cc: Thomas Gleixner +Cc: Prasad +Cc: 2.6.33.x 2.6.34.x +--- + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 142d70c..725ef4d 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -526,6 +526,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) + dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + { + struct task_struct *tsk = current; ++ int user_icebp = 0; + unsigned long dr6; + int si_code; + +@@ -534,6 +535,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + /* Filter out all the reserved bits which are preset to 1 */ + dr6 &= ~DR6_RESERVED; + ++ /* ++ * If dr6 has no reason to give us about the origin of this trap, ++ * then it's very likely the result of an icebp/int01 trap. ++ * User wants a sigtrap for that. ++ */ ++ if (!dr6 && user_mode(regs)) ++ user_icebp = 1; ++ + /* Catch kmemcheck conditions first of all! */ + if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) + return; +@@ -575,7 +584,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + regs->flags &= ~X86_EFLAGS_TF; + } + si_code = get_si_code(tsk->thread.debugreg6); +- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) ++ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + send_sigtrap(tsk, regs, error_code, si_code); + preempt_conditional_cli(regs); +