Algorithm: What set of tiles of length N can be used to generate the most amount of Scrabble-valid words?

I think this is good enough!

Here is a log of my code running under PyPy:

0:00:00.000232
E
0:00:00.001251
ER
0:00:00.048733
EAT
0:00:00.208744
ESAT
0:00:00.087425
ESATL
0:00:00.132049
ESARTP
0:00:00.380296
ESARTOP
0:00:01.409129
ESIARTLP
0:00:03.433526
ESIARNTLP
0:00:10.391252
ESIARNTOLP
0:00:25.651012
ESIARNTOLDP
0:00:56.642405
ESIARNTOLCDP
0:01:57.257293
ESIARNTOLCDUP
0:03:55.933906
ESIARNTOLCDUPM
0:07:17.146036
ESIARNTOLCDUPMG
0:10:14.844347
ESIARNTOLCDUPMGH
0:13:34.722600
ESIARNTOLCDEUPMGH
0:18:14.215019
ESIARNTOLCDEUPMGSH
0:22:47.129284
ESIARNTOLCDEUPMGSHB
0:27:56.859511
ESIARNTOLCDEUPMGSHBYK
0:46:20.448502
ESIARNTOLCDEUPMGSHBYAK
0:57:15.213635
ESIARNTOLCDEUPMGSHIBYAT
1:09:55.530180
ESIARNTOLCDEUPMGSHIBYATF
1:18:35.209599
ESIARNTOLCDEUPMGSHIBYATRF
1:21:54.095119
ESIARNTOLCDEUPMGSHIBYATRFV
1:20:16.978411
ESIARNTOLCDEUPMGSHIBYAOTRFV
1:14:24.253660
ESIARNTOLCDEUPMGSHIBYAONTRFV
1:00:37.405571

The key improvements are these.

  1. I distinguish not only between letters, but how many times the letter has been seen. Therefore every letter I can accept or move on. That was an idea I got while commenting on David Eisenstat's solution.
  2. From him I also got the idea that pruning trees out that can't lead to an answer controls the growth of the problem surprisingly well.
  3. The very first solution that I look at is simply all the top letters. This starts as a pretty good solution so despite it being depth first, we will prune pretty well.
  4. I am careful to consolidate "exhausted tries" into a single record. This reduces how much data we have to throw around.

And here is the code.

import os
import datetime
path = "enable.txt"
words = []
with open(path) as f:
    for values in f:
        words.append(values.strip().upper())

key_count = {}
for word in words:
    seen = {}
    for letter in word:
        if letter not in seen:
            seen[letter] = 0
        key = (letter, seen[letter])
        if key not in key_count:
            key_count[key] = 1
        else:
            key_count[key] += 1
        seen[letter] += 1


KEYS = sorted(key_count.keys(), key=lambda key: -key_count[key])
#print(KEYS)
#print(len(KEYS))
KEY_POS = {}
for i in range(len(KEYS)):
    KEY_POS[KEYS[i]] = i

# Now we will build a trie.  Every node has a list of words, and a dictionary
# from the next letter farther in the trie.
# BUT TRICK:, we will map each word to a sequence of numbers, and those numbers
# will be indexes into KEYS.  This allows us to use the fact that a second 'e' is
# unlikely, so we can deal with that efficiently.
class Trie:
    def __init__(self, path):
        self.words = []
        self.dict = {}
        self.min_pos = -1
        self.max_pos = -1
        self.words = []
        self.count_words = 0
        self.path = path

    def add_word (self, word):
        trie = self

        poses = []
        seen = {}
        for letter in word:
            if letter not in seen:
                seen[letter] = 0
            key = (letter, seen[letter])
            poses.append(KEY_POS[(key)])
            seen[letter] += 1
        sorted_poses = sorted(poses);
        for i in range(len(sorted_poses)):
            trie.count_words += 1
            pos = sorted_poses[i]
            if pos not in trie.dict:
                trie.dict[pos] = Trie(trie.path + KEYS[pos][0])
                if trie.max_pos < pos:
                    trie.max_pos = pos
            trie = trie.dict[pos]
        trie.count_words += 1
        trie.words.append(word)


base_trie = Trie('')
for word in words:
    base_trie.add_word(word);

def best_solution (size):
    def solve (subset, pos, best, partial):
        found = sum(x[0] for x in partial)
        upper_bound = sum(x[1] for x in partial)
        if size <= len(subset) or upper_bound < best or len(KEYS) <= pos:
            return (found, subset)
        if best < found:
            best = found
        # Figure out our next calculations.
        partial_include = []
        partial_exclude = []
        finalized_found = 0
        for this_found, this_bound, this_trie in partial:
            if this_trie is None:
                # This is a generic record of already emptied tries
                finalized_found += this_found
            elif pos in this_trie.dict:
                include_trie = this_trie.dict[pos]
                partial_include.append((
                    this_found + len(include_trie.words),
                    include_trie.count_words + this_found,
                    include_trie
                ))
                # We included the tally of found words in the previous partial.
                # So do not double-count by including it again
                partial_include.append((
                    0,
                    this_bound - include_trie.count_words - this_found,
                    this_trie
                ))
                partial_exclude.append((
                    this_found,
                    this_bound - include_trie.count_words,
                    this_trie
                ))
            elif this_found == this_bound:
                finalized_found += this_found
            else:
                partial_include.append((
                    this_found,
                    this_bound,
                    this_trie
                ))

                partial_exclude.append((
                    this_found,
                    this_bound,
                    this_trie
                ))
        if 0 < finalized_found:
            partial_include.append(
                (finalized_found, finalized_found, None)
            )
            partial_exclude.append(
                (finalized_found, finalized_found, None)
            )

        found_include, subset_include = solve(subset + [pos], pos+1, best, partial_include)
        if best < found_include:
            best = found_include
        found_exclude, subset_exclude = solve(subset, pos+1, best, partial_exclude)
        if found_include < found_exclude:
            return (found_exclude, subset_exclude)
        else:
            return (found_include, subset_include)


    count, subset = solve([], 0, 0, [(len(base_trie.words), base_trie.count_words, base_trie)])
    return ''.join([KEYS[x][0] for x in subset])

for i in range(20):
    start = datetime.datetime.now()
    print(best_solution(i))
    print(datetime.datetime.now() - start)

This code can optimize n=15 in a couple minutes using PyPy on my laptop, finding

10701 acdegilmnoprstu.

The idea is to do branch and bound, where at each node some letters are forced to be included, and others are excluded. We derive an upper bound on the quality of each node by finding an order-preserving map f (preserving the partial order of multiset inclusion) from multisets of letters to a smaller partially ordered space, then counting the number of words we can get where f(word) is included in the best f(tiles). On the smaller space we can brute force the problem using a fast convolution method (reminiscent of FFT).

To find a good space, we greedily remove letters one at a time so as to affect as few words as possible, until the upper bound can be brute forced.

import array
import collections
import functools
import heapq


def count_occurrences_of_letters(raw_word):
    occurs = collections.Counter()
    word = []
    for letter in raw_word:
        word.append(letter + str(occurs[letter]))
        occurs[letter] += 1
    return word


def greedy_censorship_order(words):
    hits = collections.defaultdict(set)
    for index, word in enumerate(words):
        for letter in word:
            hits[letter].add(index)
    order = []
    while hits:
        censored_letter = min(hits.keys(), key=lambda letter: len(hits[letter]))
        order.append(censored_letter)
        for index in hits[censored_letter]:
            for letter in words[index]:
                if letter != censored_letter:
                    hits[letter].remove(index)
        del hits[censored_letter]
    return order


def bitmap_from_word(word, alphabet):
    bitmap = 0
    censored = 0
    for letter in word:
        try:
            bitmap |= 1 << alphabet.index(letter)
        except ValueError:
            censored += 1
    return bitmap, censored


def sum_over_subsets(vector, dimension):
    for i in range(dimension):
        bit = 1 << i
        for bitmap in range(1 << dimension):
            if not (bitmap & bit):
                vector[bitmap | bit] += vector[bitmap]


def count_set_bits(n):
    return bin(n).count("1")


@functools.total_ordering
class Node:
    def __init__(self, subset, n, unfiltered_words):
        self.subset = subset
        self.n = n
        self.words = [word for word in unfiltered_words if len(word) <= n]
        self.upper_bound = sum(not word for word in self.words)
        if n == 0:
            return
        order = greedy_censorship_order(self.words)
        if not order:
            self.pivot = None
            return
        self.pivot = order[-1]
        alphabet = order[-(n + 7) :]
        zeros = [0] * (1 << len(alphabet))
        vectors = [array.array("l", zeros) for i in range(n + 1)]
        for word in self.words:
            bitmap, censored = bitmap_from_word(word, alphabet)
            for i in range(censored, n + 1):
                vectors[i][bitmap] += 1
        for vector in vectors:
            sum_over_subsets(vector, len(alphabet))
        self.upper_bound = max(
            vectors[n - count_set_bits(bitmap)][bitmap]
            for bitmap in range(1 << len(alphabet))
            if count_set_bits(bitmap) <= n
        )

    def children(self):
        if self.pivot is None:
            return
        yield Node(
            self.subset, self.n, [word for word in self.words if self.pivot not in word]
        )
        yield Node(
            self.subset | {self.pivot},
            self.n - 1,
            [
                [letter for letter in word if letter != self.pivot]
                for word in self.words
            ],
        )

    def __eq__(self, other):
        return self.upper_bound == other.upper_bound

    def __ne__(self, other):
        return self.upper_bound != other.upper_bound

    def __lt__(self, other):
        return self.upper_bound > other.upper_bound


def solve(n, words):
    heap = [Node(set(), n, words)]
    while True:
        top = heapq.heappop(heap)
        print(top.upper_bound, "".join(sorted(letter[0] for letter in top.subset)))
        if top.n == 0:
            return
        for child in top.children():
            heapq.heappush(heap, child)


def main():
    with open("enable.txt") as file:
        raw_words = file.read().split()
    words = [count_occurrences_of_letters(word) for word in raw_words]
    solve(15, words)


if __name__ == "__main__":
    main()

Here's a "dumb" sum-over-subsets that accumulates, for each count of 1 to 26, the selection of distinct letters that yields the most words in the file "enable.txt" in under 33 seconds on my laptop. (The 32 seconds is a speedup by David Eisenstat, changing my original code that ran in 6 minutes 45 seconds to an in-place method).

Since btilly and David Eisenstat already conducted the difficult work of optimising a search that would also include duplicates, we know that the information here up to 16 letters is useful.

from collections import defaultdict

def as_number(word):
    word = word.lower();
    n = 0
    for c in word:
        m = ord(c) - 97
        if n & (1 << m):
            return 0
        else:
            n |= 1 << m
    return n

def get_letters(n):
    letters = ""
    i = 0
    while n:
        if n & 1:
            letters += chr(97 + i)
        n >>= 1
        i += 1
    return letters

def f(words, N):
    hash = defaultdict(lambda: 0) #[0] * (1 << N)

    for w in words:
        num = as_number(w)
        if num:
            hash[num] += 1 #= -~hash[num]
  
    dp = [hash.get(mask, 0) for mask in range(1 << N)]

    for i in range(N):
        for mask in range(1 << N):
            if not (mask & (1 << i)):
                dp[mask ^ (1 << i)] += dp[mask]

    result = {}

    for i in xrange(1, 1 << N):
        k = bin(i).count("1")
        if k in result:
            if result[k]["best"] == dp[i]:
                result[k]["best_letters"].append(get_letters(i))
            elif result[k]["best"] < dp[i]:
                result[k]["best"] = dp[i]
                result[k]["best_letters"] = [get_letters(i)]
        elif dp[i]:
            result[k] = {
                "best": dp[i],
                "best_letters": [get_letters(i)]
            }

    return result

import os
import datetime
path = "enable.txt"
words = []
with open(path) as file:
    for values in file:
        words.append(values.strip())

start = datetime.datetime.now()
print f(words, 26)
print(datetime.datetime.now() - start)

Output:

// ♥ pypy py.py
{
    2: {
        'best': 2,
        'best_letters': ['ab', 'de', 'ah', 'eh', 'al', 'am', 'em', 'an', 'en', 'do', 'ho', 'mo', 'no', 'er', 'is', 'os', 'at', 'it', 'mu', 'nu', 'ow', 'ay', 'oy']
    },
    3: {
        'best': 9,
        'best_letters': ['aet']
    },
    4: {
        'best': 24,
        'best_letters': ['aest']
    },
    5: {
        'best': 66,
        'best_letters': ['aelst']
    },
    6: {
        'best': 150,
        'best_letters': ['aeprst']
    },
    7: {
        'best': 283,
        'best_letters': ['aeoprst']
    },
    8: {
        'best': 543,
        'best_letters': ['aeilprst']
    },
    9: {
        'best': 945,
        'best_letters': ['aeilnprst']
    },
    10: {
        'best': 1590,
        'best_letters': ['aeilnoprst']
    },
    11: {
        'best': 2557,
        'best_letters': ['adeilnoprst']
    },
    12: {
        'best': 3855,
        'best_letters': ['acdeilnoprst']
    },
    13: {
        'best': 5648,
        'best_letters': ['acdeilnoprstu']
    },
    14: {
        'best': 8001,
        'best_letters': ['acdeilmnoprstu']
    },
    15: {
        'best': 10701,
        'best_letters': ['acdegilmnoprstu']
    },
    16: {
        'best': 14060,
        'best_letters': ['acdeghilmnoprstu']
    },
    17: {
        'best': 17225,
        'best_letters': ['abcdeghilmnoprstu']
    },
    18: {
        'best': 20696,
        'best_letters': ['abcdeghilmnoprstuy']
    },
    19: {
        'best': 23723,
        'best_letters': ['abcdeghiklmnoprstuy']
    },
    20: {
        'best': 26542,
        'best_letters': ['abcdefghiklmnoprstuy']
    },
    21: {
        'best': 29501,
        'best_letters': ['abcdefghiklmnoprstuwy']
    },
    22: {
        'best': 31717,
        'best_letters': ['abcdefghiklmnoprstuvwy']
    },
    23: {
        'best': 32675,
        'best_letters': ['abcdefghiklmnoprstuvwyz']
    },
    24: {
        'best': 33548,
        'best_letters': ['abcdefghiklmnoprstuvwxyz']
    },
    25: {
        'best': 34299,
        'best_letters': ['abcdefghijklmnoprstuvwxyz']
    },
    26: {
        'best': 34816,
        'best_letters': ['abcdefghijklmnopqrstuvwxyz']
    }
}
0:00:32.295888