Ham*_*med 6 python apache-spark-sql pyspark keras tensorflow
我想提供来自火花集群的数据,以训练深度网络。我的节点中没有 GPU,所以分布式 TensorFlow 或类似的包elephas不是一种选择。
我想出了以下可以完成这项工作的生成器。它只是从 Spark 中检索下一批。为了处理批次,我添加了一个额外的列index(它只是增量 id 列),并在每次调用下一个批次时对其进行过滤。
class SparkBatchGenerator(tfk.utils.Sequence):
def __init__(self, spark_df, batch_size, sample_count=None, feature_col='features', label_col='labels'):
w = Window().partitionBy(sf.lit('a')).orderBy(sf.lit('a'))
df = spark_df.withColumn('index', sf.row_number().over(w)).sort('index')
self.X = df.select([feature_col, 'index'])
self.y = df.select([label_col, 'index'])
self.data_count = sample_count if sample_count else spark_df.count()
self.feature_col = feature_col
self.label_col = label_col
self.batch_size = batch_size
def __len__(self):
return np.ceil(self.data_count /self.batch_size).astype(int)
def __getitem__(self, idx):
start, end = idx * self.batch_size, (idx + 1) * self.batch_size
batch_x = (
self.X.filter(f'index >= {start} and index < {end}')
.toPandas()[self.feature_col]
.apply(lambda x: x.toArray()).tolist()
)
batch_y = (
self.y.filter(f'index >= {start} and index < {end}')
.toPandas()[self.label_col].tolist()
)
return np.array(batch_x), np.array(batch_y)
Run Code Online (Sandbox Code Playgroud)
这有效,但当然很慢,特别是在batch_size很小的时候。我只是想知道是否有人有更好的解决方案。
我曾经tf.data.Dataset处理过这个。我可以缓冲来自 spark 的数据,然后将批量创建的工作留给 tensorflow dataset api。现在速度要快得多:
class MyGenerator(object):
def __init__(
self, spark_df, buffer_size, feature_col="features", label_col="labels"
):
w = Window().partitionBy(sf.lit("a")).orderBy(sf.lit("a"))
self.df = (
spark_df.withColumn("index", sf.row_number().over(w) - 1)
.sort("index")
.select([feature_col, "index", label_col])
)
self.feature_col = feature_col
self.label_col = label_col
self.buffer_size = buffer_size
def generate_data(self):
idx = 0
buffer_counter = 0
buffer = self.df.filter(
f"index >= {idx} and index < {self.buffer_size}"
).toPandas()
while len(buffer) > 0:
if idx < len(buffer):
X = buffer.iloc[idx][self.feature_col].toArray() / 255.0
y = buffer.iloc[idx][self.label_col]
idx += 1
yield X.reshape((28, 28)), y
else:
buffer = self.df.filter(
f"index >= {buffer_counter * self.buffer_size} "
f"and index < {(buffer_counter + 1) * self.buffer_size}"
).toPandas()
idx = 0
buffer_counter += 1
Run Code Online (Sandbox Code Playgroud)
batch_size = 128
buffer_size = 4*1024
my_gen = MyGenerator(feature_df, buffer_size)
dataset = tf.data.Dataset.from_generator(my_gen.generate_data, output_types=(tf.float32, tf.int32))
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE).batch(batch_size, drop_remainder=True)
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
1209 次 |
| 最近记录: |